--- /dev/null
+building liblfds
+================
+
+Windows (user-mode)
+===================
+1. Use Microsoft Visual Studio 2008 or Visual C++ 2008 Express Edition
+ to load "liblfds.sln".
+
+2. Use Microsoft Windows SDK and GNUmake to run makefile.windows (obviously
+ you'll need to have run the appropriate vcvars*.bat first; you can build
+ for both IA64, 64-bit and 32-bit - just run the correct vcvars batch file).
+
+ Targets are "librel", "libdbg", "dllrel", "dlldbg" and "clean". You need
+ to clean between switching targets.
+
+Windows (kernel)
+================
+Use the Windows Driver Kit "build" command. Prior to running "build",
+if you wish to build a static library, run the batch file
+"runme_before_win_kernel_static_lib_build.bat"; if you wish to
+build a dynamic library, instead run "runme_before_win_kernel_dynamic_lib_build.bat".
+
+The Windows kernel build system is rather limited and rather than
+really rather messing up the directory/file structure just for the
+Windows kernel platform, I've instead arranged it that these batch
+files do the necessary work so that "build" will work.
+
+The batch files are idempotent; you can run them as often as you
+like, in any order, at any time (before or after builds), and they'll
+do the right thing.
+
+Linux
+=====
+Use GNUmake to run "makefile.linux". Targets are "arrel", "ardbg",
+"sorel", "sodbg" and "clean". You need to clean between switching
+targets.
+
+
--- /dev/null
+DIRS = src
+
--- /dev/null
+#ifndef __LIBLFDS600_H
+
+ /***** library header *****/
+ #define LFDS600_RELEASE_NUMBER 1
+
+
+
+
+ /***** lfds600_abstraction *****/
+
+ /***** defines *****/
+ #if (defined _WIN64 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+ // TRD : 64-bit Windows user-mode with the Microsoft C compiler, any CPU
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <windows.h>
+ #include <intrin.h>
+ typedef unsigned __int64 lfds600_atom_t;
+ #define LFDS600_INLINE extern __forceinline
+ #define LFDS600_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS600_ALIGN_SINGLE_POINTER 8
+ #define LFDS600_ALIGN_DOUBLE_POINTER 16
+ #endif
+
+ #if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+ // TRD : 32-bit Windows user-mode with the Microsoft C compiler, any CPU
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <windows.h>
+ #include <intrin.h>
+ typedef unsigned long int lfds600_atom_t;
+ #define LFDS600_INLINE extern __forceinline
+ #define LFDS600_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS600_ALIGN_SINGLE_POINTER 4
+ #define LFDS600_ALIGN_DOUBLE_POINTER 8
+
+ // TRD : this define is documented but missing in Microsoft Platform SDK v7.0
+ #define _InterlockedCompareExchangePointer(destination, exchange, compare) _InterlockedCompareExchange((volatile long *) destination, (long) exchange, (long) compare)
+ #endif
+
+ #if (defined _WIN64 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+ // TRD : 64-bit Windows kernel with the Microsoft C compiler, any CPU
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <wdm.h>
+ typedef unsigned __int64 lfds600_atom_t;
+ #define LFDS600_INLINE extern __forceinline
+ #define LFDS600_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS600_ALIGN_SINGLE_POINTER 8
+ #define LFDS600_ALIGN_DOUBLE_POINTER 16
+ #endif
+
+ #if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+ // TRD : 32-bit Windows kernel with the Microsoft C compiler, any CPU
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <wdm.h>
+ typedef unsigned long int lfds600_atom_t;
+ #define LFDS600_INLINE extern __forceinline
+ #define LFDS600_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS600_ALIGN_SINGLE_POINTER 4
+ #define LFDS600_ALIGN_DOUBLE_POINTER 8
+
+ // TRD : this define is documented but missing in Microsoft Platform SDK v7.0
+ #define _InterlockedCompareExchangePointer(destination, exchange, compare) _InterlockedCompareExchange((volatile long *) destination, (long) exchange, (long) compare)
+ #endif
+
+ #if (defined __unix__ && defined __x86_64__ && __GNUC__)
+ // TRD : any UNIX with GCC on x64
+ #define _XOPEN_SOURCE 600
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ typedef unsigned long long int lfds600_atom_t;
+ #define LFDS600_INLINE inline
+ #define LFDS600_ALIGN(alignment) __attribute__( (aligned(alignment)) )
+ #define LFDS600_ALIGN_SINGLE_POINTER 8
+ #define LFDS600_ALIGN_DOUBLE_POINTER 16
+ #endif
+
+ #if (defined __unix__ && defined __i686__ && __GNUC__)
+ // TRD : any UNIX with GCC on x86
+ #define _XOPEN_SOURCE 600
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ typedef unsigned long int lfds600_atom_t;
+ #define LFDS600_INLINE inline
+ #define LFDS600_ALIGN(alignment) __attribute__( (aligned(alignment)) )
+ #define LFDS600_ALIGN_SINGLE_POINTER 4
+ #define LFDS600_ALIGN_DOUBLE_POINTER 8
+ #endif
+
+ #if (defined __unix__ && defined __arm__ && __GNUC__)
+ // TRD : any UNIX with GCC on ARM
+ #define _XOPEN_SOURCE 600
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ typedef unsigned long int lfds600_atom_t;
+ #define LFDS600_INLINE inline
+ #define LFDS600_ALIGN(alignment) __attribute__( (aligned(alignment)) )
+ #define LFDS600_ALIGN_SINGLE_POINTER 4
+ #define LFDS600_ALIGN_DOUBLE_POINTER 8
+ #endif
+
+ /***** enums *****/
+ enum data_structure_validity
+ {
+ VALIDITY_VALID,
+ VALIDITY_INVALID_LOOP,
+ VALIDITY_INVALID_MISSING_ELEMENTS,
+ VALIDITY_INVALID_ADDITIONAL_ELEMENTS,
+ VALIDITY_INVALID_TEST_DATA
+ };
+
+ /***** structs *****/
+ struct lfds600_validation_info
+ {
+ lfds600_atom_t
+ min_elements,
+ max_elements;
+ };
+
+ /***** public prototypes *****/
+ void lfds600_abstraction_aligned_free( void *memory );
+ void *lfds600_abstraction_aligned_malloc( size_t size, size_t align_in_bytes );
+ lfds600_atom_t lfds600_abstraction_cas( volatile lfds600_atom_t *destination, lfds600_atom_t exchange, lfds600_atom_t compare );
+ unsigned char lfds600_abstraction_dcas( volatile lfds600_atom_t *destination, lfds600_atom_t *exchange, lfds600_atom_t *compare );
+ lfds600_atom_t lfds600_abstraction_increment( lfds600_atom_t *value );
+
+
+
+
+
+ /***** lfds600_freelist *****/
+
+ /***** enums *****/
+ enum lfds600_freelist_query_type
+ {
+ LFDS600_FREELIST_QUERY_ELEMENT_COUNT,
+ LFDS600_FREELIST_QUERY_VALIDATE
+ };
+
+ /***** incomplete types *****/
+ struct lfds600_freelist_state;
+ struct lfds600_freelist_element;
+
+ /***** public prototypes *****/
+ int lfds600_freelist_new( struct lfds600_freelist_state **fs, lfds600_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state );
+ void lfds600_freelist_delete( struct lfds600_freelist_state *fs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+ lfds600_atom_t lfds600_freelist_new_elements( struct lfds600_freelist_state *fs, lfds600_atom_t number_elements );
+
+ struct lfds600_freelist_element *lfds600_freelist_pop( struct lfds600_freelist_state *fs, struct lfds600_freelist_element **fe );
+ struct lfds600_freelist_element *lfds600_freelist_guaranteed_pop( struct lfds600_freelist_state *fs, struct lfds600_freelist_element **fe );
+ void lfds600_freelist_push( struct lfds600_freelist_state *fs, struct lfds600_freelist_element *fe );
+
+ void *lfds600_freelist_get_user_data_from_element( struct lfds600_freelist_element *fe, void **user_data );
+ void lfds600_freelist_set_user_data_in_element( struct lfds600_freelist_element *fe, void *user_data );
+
+ void lfds600_freelist_query( struct lfds600_freelist_state *fs, enum lfds600_freelist_query_type query_type, void *query_input, void *query_output );
+
+
+
+
+
+ /***** lfds600_queue *****/
+
+ /***** enums *****/
+ enum lfds600_queue_query_type
+ {
+ LFDS600_QUEUE_QUERY_ELEMENT_COUNT,
+ LFDS600_QUEUE_QUERY_VALIDATE
+ };
+
+ /***** incomplete types *****/
+ struct lfds600_queue_state;
+
+ /***** public prototypes *****/
+ int lfds600_queue_new( struct lfds600_queue_state **sq, lfds600_atom_t number_elements );
+ void lfds600_queue_delete( struct lfds600_queue_state *qs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+ int lfds600_queue_enqueue( struct lfds600_queue_state *qs, void *user_data );
+ int lfds600_queue_guaranteed_enqueue( struct lfds600_queue_state *qs, void *user_data );
+ int lfds600_queue_dequeue( struct lfds600_queue_state *qs, void **user_data );
+
+ void lfds600_queue_query( struct lfds600_queue_state *qs, enum lfds600_queue_query_type query_type, void *query_input, void *query_output );
+
+
+
+
+
+ /***** lfds600_ringbuffer *****/
+
+ /***** enums *****/
+ enum lfds600_ringbuffer_query_type
+ {
+ LFDS600_RINGBUFFER_QUERY_VALIDATE
+ };
+
+ /***** incomplete types *****/
+ struct lfds600_ringbuffer_state;
+
+ /***** public prototypes *****/
+ int lfds600_ringbuffer_new( struct lfds600_ringbuffer_state **rs, lfds600_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state );
+ void lfds600_ringbuffer_delete( struct lfds600_ringbuffer_state *rs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+ struct lfds600_freelist_element *lfds600_ringbuffer_get_read_element( struct lfds600_ringbuffer_state *rs, struct lfds600_freelist_element **fe );
+ struct lfds600_freelist_element *lfds600_ringbuffer_get_write_element( struct lfds600_ringbuffer_state *rs, struct lfds600_freelist_element **fe, int *overwrite_flag );
+
+ void lfds600_ringbuffer_put_read_element( struct lfds600_ringbuffer_state *rs, struct lfds600_freelist_element *fe );
+ void lfds600_ringbuffer_put_write_element( struct lfds600_ringbuffer_state *rs, struct lfds600_freelist_element *fe );
+
+ void lfds600_ringbuffer_query( struct lfds600_ringbuffer_state *rs, enum lfds600_ringbuffer_query_type query_type, void *query_input, void *query_output );
+
+
+
+
+
+ /***** lfds600_slist *****/
+
+ /***** incomplete types *****/
+ struct lfds600_slist_state;
+ struct lfds600_slist_element;
+
+ /***** public prototypes *****/
+ int lfds600_slist_new( struct lfds600_slist_state **ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+ void lfds600_slist_delete( struct lfds600_slist_state *ss );
+
+ struct lfds600_slist_element *lfds600_slist_new_head( struct lfds600_slist_state *ss, void *user_data );
+ struct lfds600_slist_element *lfds600_slist_new_next( struct lfds600_slist_element *se, void *user_data );
+
+ void lfds600_slist_delete_element( struct lfds600_slist_state *ss, struct lfds600_slist_element *se );
+ void lfds600_slist_delete_all_elements( struct lfds600_slist_state *ss );
+
+ int lfds600_slist_get_user_data_from_element( struct lfds600_slist_element *se, void **user_data );
+ int lfds600_slist_set_user_data_in_element( struct lfds600_slist_element *se, void *user_data );
+
+ struct lfds600_slist_element *lfds600_slist_get_head( struct lfds600_slist_state *ss, struct lfds600_slist_element **se );
+ struct lfds600_slist_element *lfds600_slist_get_next( struct lfds600_slist_element *se, struct lfds600_slist_element **next_se );
+ struct lfds600_slist_element *lfds600_slist_get_head_and_then_next( struct lfds600_slist_state *ss, struct lfds600_slist_element **se );
+
+
+
+
+
+ /***** lfds600_stack *****/
+
+ /***** enums *****/
+ enum lfds600_stack_query_type
+ {
+ LFDS600_STACK_QUERY_ELEMENT_COUNT
+ };
+
+ /***** incomplete types *****/
+ struct lfds600_stack_state;
+
+ /***** public prototypes *****/
+ int lfds600_stack_new( struct lfds600_stack_state **ss, lfds600_atom_t number_elements );
+ void lfds600_stack_delete( struct lfds600_stack_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+ void lfds600_stack_clear( struct lfds600_stack_state *ss, void (*user_data_clear_function)(void *user_data, void *user_state), void *user_state );
+
+ int lfds600_stack_push( struct lfds600_stack_state *ss, void *user_data );
+ int lfds600_stack_guaranteed_push( struct lfds600_stack_state *ss, void *user_data );
+ int lfds600_stack_pop( struct lfds600_stack_state *ss, void **user_data );
+
+ void lfds600_stack_query( struct lfds600_stack_state *ss, enum lfds600_stack_query_type query_type, void *query_input, void *query_output );
+
+
+
+
+
+ #define __LIBLFDS600_H
+
+#endif
+
--- /dev/null
+EXPORTS
+
+lfds600_freelist_delete = lfds600_freelist_delete @1
+lfds600_freelist_get_user_data_from_element = lfds600_freelist_get_user_data_from_element @2
+lfds600_freelist_guaranteed_pop = lfds600_freelist_guaranteed_pop @3
+lfds600_freelist_new = lfds600_freelist_new @4
+lfds600_freelist_new_elements = lfds600_freelist_new_elements @5
+lfds600_freelist_pop = lfds600_freelist_pop @6
+lfds600_freelist_push = lfds600_freelist_push @7
+lfds600_freelist_query = lfds600_freelist_query @8
+lfds600_freelist_set_user_data_in_element = lfds600_freelist_set_user_data_in_element @9
+
+lfds600_queue_delete = lfds600_queue_delete @10
+lfds600_queue_dequeue = lfds600_queue_dequeue @11
+lfds600_queue_enqueue = lfds600_queue_enqueue @12
+lfds600_queue_guaranteed_enqueue = lfds600_queue_guaranteed_enqueue @13
+lfds600_queue_new = lfds600_queue_new @14
+lfds600_queue_query = lfds600_queue_query @15
+
+lfds600_ringbuffer_delete = lfds600_ringbuffer_delete @16
+lfds600_ringbuffer_get_read_element = lfds600_ringbuffer_get_read_element @17
+lfds600_ringbuffer_get_write_element = lfds600_ringbuffer_get_write_element @18
+lfds600_ringbuffer_new = lfds600_ringbuffer_new @19
+lfds600_ringbuffer_put_read_element = lfds600_ringbuffer_put_read_element @20
+lfds600_ringbuffer_put_write_element = lfds600_ringbuffer_put_write_element @21
+
+lfds600_slist_delete = lfds600_slist_delete @ 22
+lfds600_slist_delete_all_elements = lfds600_slist_delete_all_elements @ 23
+lfds600_slist_delete_element = lfds600_slist_delete_element @ 24
+lfds600_slist_get_head = lfds600_slist_get_head @ 25
+lfds600_slist_get_head_and_then_next = lfds600_slist_get_head_and_then_next @ 26
+lfds600_slist_get_next = lfds600_slist_get_next @ 27
+lfds600_slist_get_user_data_from_element = lfds600_slist_get_user_data_from_element @ 28
+lfds600_slist_new = lfds600_slist_new @ 29
+lfds600_slist_new_head = lfds600_slist_new_head @ 30
+lfds600_slist_new_next = lfds600_slist_new_next @ 31
+lfds600_slist_set_user_data_in_element = lfds600_slist_set_user_data_in_element @ 32
+
+lfds600_stack_clear = lfds600_stack_clear @33
+lfds600_stack_delete = lfds600_stack_delete @34
+lfds600_stack_guaranteed_push = lfds600_stack_guaranteed_push @35
+lfds600_stack_new = lfds600_stack_new @36
+lfds600_stack_pop = lfds600_stack_pop @37
+lfds600_stack_push = lfds600_stack_push @38
+lfds600_stack_query = lfds600_stack_query @39
+
--- /dev/null
+\r
+Microsoft Visual Studio Solution File, Format Version 10.00\r
+# Visual Studio 2008\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblfds600", "liblfds600.vcproj", "{F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug DLL|Win32 = Debug DLL|Win32\r
+ Debug DLL|x64 = Debug DLL|x64\r
+ Debug Lib|Win32 = Debug Lib|Win32\r
+ Debug Lib|x64 = Debug Lib|x64\r
+ Release DLL|Win32 = Release DLL|Win32\r
+ Release DLL|x64 = Release DLL|x64\r
+ Release Lib|Win32 = Release Lib|Win32\r
+ Release Lib|x64 = Release Lib|x64\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.Build.0 = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.ActiveCfg = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.Build.0 = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.ActiveCfg = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.Build.0 = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.ActiveCfg = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.Build.0 = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.ActiveCfg = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.Build.0 = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.ActiveCfg = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.Build.0 = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.ActiveCfg = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.Build.0 = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.ActiveCfg = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.Build.0 = Release Lib|x64\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+<?xml version="1.0" encoding="Windows-1252"?>\r
+<VisualStudioProject\r
+ ProjectType="Visual C++"\r
+ Version="9.00"\r
+ Name="liblfds600"\r
+ ProjectGUID="{F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}"\r
+ RootNamespace="liblfds"\r
+ TargetFrameworkVersion="196613"\r
+ >\r
+ <Platforms>\r
+ <Platform\r
+ Name="Win32"\r
+ />\r
+ <Platform\r
+ Name="x64"\r
+ />\r
+ </Platforms>\r
+ <ToolFiles>\r
+ </ToolFiles>\r
+ <Configurations>\r
+ <Configuration\r
+ Name="Debug Lib|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ MinimalRebuild="true"\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug Lib|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ SmallerTypeCheck="true"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release Lib|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release Lib|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug DLL|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ MinimalRebuild="true"\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ RuntimeLibrary="3"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds600.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug DLL|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ TargetEnvironment="3"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ SmallerTypeCheck="true"\r
+ RuntimeLibrary="3"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds600.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release DLL|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="2"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds600.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release DLL|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ TargetEnvironment="3"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="2"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds600.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ </Configurations>\r
+ <References>\r
+ </References>\r
+ <Files>\r
+ <Filter\r
+ Name="inc"\r
+ Filter="h;hpp;hxx;hm;inl;inc;xsd"\r
+ UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"\r
+ >\r
+ <File\r
+ RelativePath=".\inc\abstraction.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\inc\freelist.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\inc\liblfds600.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\inc\queue.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\inc\ringbuffer.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\inc\stack.h"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="src"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds600_internal.h"\r
+ >\r
+ </File>\r
+ <Filter\r
+ Name="lfds600_abstraction"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds600_abstraction\lfds600_abstraction_aligned_free.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_abstraction\lfds600_abstraction_aligned_malloc.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_abstraction\lfds600_abstraction_cas.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_abstraction\lfds600_abstraction_dcas.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_abstraction\lfds600_abstraction_increment.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_abstraction\lfds600_abstraction_internal.h"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds600_freelist"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds600_freelist\lfds600_freelist_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_freelist\lfds600_freelist_get_and_set.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_freelist\lfds600_freelist_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_freelist\lfds600_freelist_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_freelist\lfds600_freelist_pop_push.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_freelist\lfds600_freelist_query.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds600_queue"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds600_queue\lfds600_queue_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_queue\lfds600_queue_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_queue\lfds600_queue_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_queue\lfds600_queue_query.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_queue\lfds600_queue_queue.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds600_ringbuffer"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds600_ringbuffer\lfds600_ringbuffer_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_ringbuffer\lfds600_ringbuffer_get_and_put.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_ringbuffer\lfds600_ringbuffer_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_ringbuffer\lfds600_ringbuffer_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_ringbuffer\lfds600_ringbuffer_query.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds600_slist"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds600_slist\lfds600_slist_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_slist\lfds600_slist_get_and_set.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_slist\lfds600_slist_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_slist\lfds600_slist_link.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_slist\lfds600_slist_new.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds600_stack"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds600_stack\lfds600_stack_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_stack\lfds600_stack_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_stack\lfds600_stack_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_stack\lfds600_stack_push_pop.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds600_stack\lfds600_stack_query.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ </Filter>\r
+ </Files>\r
+ <Globals>\r
+ </Globals>\r
+</VisualStudioProject>\r
--- /dev/null
+##### paths #####
+BINDIR = bin
+INCDIR = inc
+OBJDIR = obj
+SRCDIR = src
+
+##### misc #####
+QUIETLY = 1>/dev/null 2>/dev/null
+
+##### sources, objects and libraries #####
+BINNAME = liblfds600
+AR_BINARY = $(BINDIR)/$(BINNAME).a
+SO_BINARY = $(BINDIR)/$(BINNAME).so
+SRCDIRS = lfds600_abstraction lfds600_freelist lfds600_queue lfds600_ringbuffer lfds600_slist lfds600_stack
+# TRD : be aware - in the linux makefile, with the one-pass linking behaviour of the GNU linker, the order
+# of source files matters! this is because it leads to the ordering of objects in the library and
+# that in turn, since the data structures all use the freelist API and the abstraction API, has to be
+# correct
+SOURCES = lfds600_queue_delete.c lfds600_queue_new.c lfds600_queue_query.c lfds600_queue_queue.c \
+ lfds600_ringbuffer_delete.c lfds600_ringbuffer_get_and_put.c lfds600_ringbuffer_new.c lfds600_ringbuffer_query.c \
+ lfds600_slist_delete.c lfds600_slist_get_and_set.c lfds600_slist_link.c lfds600_slist_new.c \
+ lfds600_stack_delete.c lfds600_stack_new.c lfds600_stack_push_pop.c lfds600_stack_query.c \
+ lfds600_freelist_delete.c lfds600_freelist_get_and_set.c lfds600_freelist_new.c lfds600_freelist_query.c lfds600_freelist_pop_push.c \
+ lfds600_abstraction_aligned_free.c lfds600_abstraction_aligned_malloc.c lfds600_abstraction_cas.c lfds600_abstraction_dcas.c lfds600_abstraction_increment.c
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))
+
+##### CPU variants #####
+GCCARCH = $(shell uname -m)
+
+ifeq ($(GCCARCH),x86_64)
+ GCCARCH = core2
+endif
+
+ifeq ($(findstring arm,$(GCCARCH)),arm)
+ GCCARCH = armv6k
+endif
+
+##### tools #####
+MAKE = make
+MFLAGS =
+
+DG = gcc
+DGFLAGS = -MM -std=c99 -I"$(SRCDIR)" -I"$(INCDIR)"
+
+CC = gcc
+CBASE = -Wall -Wno-unknown-pragmas -std=c99 -march=$(GCCARCH) -c -I"$(SRCDIR)" -I"$(INCDIR)"
+CFREL = -O2 -finline-functions -Wno-strict-aliasing
+CFDBG = -O0 -g
+
+AR = ar
+AFLAGS = -rcs
+
+LD = gcc
+LFBASE = -Wall -std=c99 -shared
+LFREL = -O2 -s
+LFDBG = -O0 -g
+
+##### rel/dbg .a/.so variants #####
+ifeq ($(findstring so,$(MAKECMDGOALS)),so)
+ CBASE := $(CBASE) -fpic
+endif
+
+CFLAGS = $(CBASE) $(CFDBG)
+LFLAGS = $(LFBASE) $(LFDBG)
+
+ifeq ($(findstring rel,$(MAKECMDGOALS)),rel)
+ CFLAGS = $(CBASE) $(CFREL)
+ LFLAGS = $(LFBASE) $(LFREL)
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.o : %.c
+ $(DG) $(DGFLAGS) $< >$(OBJDIR)/$*.d
+ $(CC) $(CFLAGS) -o $@ $<
+
+##### explicit rules #####
+$(AR_BINARY) : $(OBJECTS)
+ $(AR) $(AFLAGS) $(AR_BINARY) $(OBJECTS)
+
+$(SO_BINARY) : $(OBJECTS)
+ $(LD) $(LFLAGS) $(SYSLIBS) $(OBJECTS) -o $(SO_BINARY)
+
+##### phony #####
+.PHONY : clean arrel ardbg sorel sodbg
+
+clean :
+ @rm -f $(BINDIR)/$(BINNAME).* $(OBJDIR)/*.o $(OBJDIR)/*.d
+
+arrel : $(AR_BINARY)
+ardbg : $(AR_BINARY)
+
+sorel : $(SO_BINARY)
+sodbg : $(SO_BINARY)
+
+##### dependencies #####
+-include $(DEPENDS)
+
+##### notes #####
+# TRD : we use -std=c99 purely to permit C++ style comments
+
--- /dev/null
+##### paths #####
+BINDIR = bin
+INCDIR = inc
+OBJDIR = obj
+SRCDIR = src
+
+##### misc #####
+QUIETLY = 1>nul 2>nul
+
+##### sources, objects and libraries #####
+BINNAME = liblfds600
+LIB_BINARY = $(BINDIR)\$(BINNAME).lib
+DLL_BINARY = $(BINDIR)\$(BINNAME).dll
+SRCDIRS = lfds600_abstraction lfds600_freelist lfds600_queue lfds600_ringbuffer lfds600_slist lfds600_stack
+SOURCES = lfds600_abstraction_aligned_free.c lfds600_abstraction_aligned_malloc.c lfds600_abstraction_cas.c lfds600_abstraction_dcas.c lfds600_abstraction_increment.c \
+ lfds600_freelist_delete.c lfds600_freelist_get_and_set.c lfds600_freelist_new.c lfds600_freelist_query.c lfds600_freelist_pop_push.c \
+ lfds600_queue_delete.c lfds600_queue_new.c lfds600_queue_query.c lfds600_queue_queue.c \
+ lfds600_ringbuffer_delete.c lfds600_ringbuffer_get_and_put.c lfds600_ringbuffer_new.c lfds600_ringbuffer_query.c \
+ lfds600_slist_delete.c lfds600_slist_get_and_set.c lfds600_slist_link.c lfds600_slist_new.c \
+ lfds600_stack_delete.c lfds600_stack_new.c lfds600_stack_push_pop.c lfds600_stack_query.c
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))
+
+##### tools #####
+MAKE = make
+MFLAGS =
+
+CC = cl
+CBASE = /nologo /W4 /WX /c "-I$(SRCDIR)" "-I$(INCDIR)" "/Fd$(BINDIR)\$(BINNAME).pdb" /DUNICODE /D_UNICODE /DWIN32_LEAN_AND_MEAN
+CFREL = /Ox /DNDEBUG
+CFDBG = /Od /Gm /Zi /D_DEBUG
+
+AR = lib
+AFLAGS = /nologo /subsystem:console /wx /verbose
+
+LD = link
+LFBASE = /dll /def:$(BINNAME).def /nologo /subsystem:console /wx /nodefaultlib /nxcompat
+LFREL = /incremental:no
+LFDBG = /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"
+
+##### variants #####
+CFLAGS = $(CBASE) $(CFDBG) /MTd
+LFLAGS = $(LFBASE) $(LFDBG)
+CLIB = libcmtd.lib
+
+ifeq ($(MAKECMDGOALS),librel)
+ CFLAGS = $(CBASE) $(CFREL) /MT
+ LFLAGS = $(LFBASE) $(LFREL)
+ CLIB = libcmt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),libdbg)
+ CFLAGS = $(CBASE) $(CFDBG) /MTd
+ LFLAGS = $(LFBASE) $(LFDBG)
+ CLIB = libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dllrel)
+ CFLAGS = $(CBASE) $(CFREL) /MD
+ LFLAGS = $(LFBASE) $(LFREL)
+ CLIB = msvcrt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dlldbg)
+ CFLAGS = $(CBASE) $(CFDBG) /MDd
+ LFLAGS = $(LFBASE) $(LFDBG)
+ CLIB = msvcrtd.lib
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.obj : %.c
+ $(CC) $(CFLAGS) "/Fo$@" $<
+
+##### explicit rules #####
+$(LIB_BINARY) : $(OBJECTS)
+ $(AR) $(AFLAGS) $(OBJECTS) /out:$(LIB_BINARY)
+
+$(DLL_BINARY) : $(OBJECTS)
+ $(LD) $(LFLAGS) $(CLIB) $(OBJECTS) /out:$(DLL_BINARY)
+
+##### phony #####
+.PHONY : clean librel libdbg dllrel dlldbg
+
+clean :
+ @erase /Q $(BINDIR)\$(BINNAME).* $(OBJDIR)\*.obj $(QUIETLY)
+
+librel : $(LIB_BINARY)
+libdbg : $(LIB_BINARY)
+
+dllrel : $(DLL_BINARY)
+dlldbg : $(DLL_BINARY)
+
--- /dev/null
+introduction
+============
+Welcome to liblfds, a portable, license-free, lock-free data structure
+library written in C.
+
+platforms
+=========
+Currently liblfds out-of-the-box supports;
+
+Operating System CPU Toolset
+================ ========== =======
+Windows 64-bit IA64 & x64 1. Microsoft Visual Studio 2008
+ 2. Microsoft Windows SDK and GNUmake >= 3.8.1
+
+Windows 32-bit x64 & x86 1. Microsoft Visual Studio 2008
+ 2. Visual C++ 2008 Express Edition
+ 3. Microsoft Windows SDK and GNUmake >= 3.8.1
+
+Windows Kernel IA64, x64, 1. Windows Driver Kit >= 7.0.0
+ x86
+
+Linux 64-bit x64 1. GCC >= 4.1.0 and GNUmake >= 3.8.1
+
+Linux 32-bit x64, x86, 1. GCC >= 4.1.0 and GNUmake >= 3.8.1
+ ARM
+
+data structures
+===============
+Currently liblfds provides the following;
+
+* Freelist
+* Queue
+* Ringbuffer
+* Singly linked list (logical delete only)
+* Stack
+
+liblfds on-line
+===============
+On the liblfds home page, you will find the blog, a bugzilla, a forum, a
+wikipedia and the current and all historical source releases.
+
+The wikipedia contains comprehensive documentation for development,
+building, testing and porting.
+
+http://www.liblfds.org
+
+license
+=======
+There is no license. You are free to use this code in any way.
+
+building
+========
+On Windows, depending on your target platform, one of the following toolchains
+is required;
+
+ * Microsoft Visual Studio 2008 (expensive)
+ * Visual C++ 2008 Express Edition (free, but no 64 bit support)
+ * Microsoft Windows SDK (free, no GUI, has 64 bit support) and GNUmake 3.81
+
+On Windows (kernel-mode), the following toolchain is required;
+
+ * Windows Driver Kit 7.0.0 or later
+
+On Linux, the following toolchain is required;
+
+ * gcc 4.1.0 or later and GNUmake 3.81
+
+For documentation, see the building guide in the wikipedia.
+
+using
+=====
+Once built, there is a single header file, /inc/liblfds.h, which you must include
+in your source code, and a single library file /bin/liblfds.*, where the suffix
+depends on your platform and your build choice (static or dynamic), to which,
+if statically built, you must link directly or, if dynamically built, you must
+arrange your system such that the library can be found by the loader at run-time.
+
+testing
+=======
+The library comes with a command line test and benchmark program. This
+program requires threads. As such, it is only suitable for platforms providing
+thread support and which can execute a command line binary. Currently this
+means the test and benchmark program works for all platforms except the Windows
+Kernel.
+
+For documentation, see the testing and benchmarking guide in the wikipedia.
+
+porting
+=======
+Both the test program and liblfds provide an abstraction layer which acts to
+mask platform differences. Porting is the act of implementing on your platform
+the functions which make up the abstraction layers. You do not need to port
+the test program to port liblfds, but obviously it is recommended, so you can
+test your port.
+
+To support liblfds, your platform must support either contigious double-word
+compare-and-swap (e.g. x86/x64) or contigious double-word load-link/conditional-store
+where normal loads cannot occur inside the LL/CS pair (e.g. ARM) or single word
+load-link/conditional-store where normal loads can occur inside the LL/CS pair.
+
+For documentation, see the porting guide in the wikipedia.
+
+release history
+===============
+release 1, 25th September 2009, svn revision 1574.
+ - initial release
+
+release 2, 5th October 2009, svn revision 1599.
+ - added abstraction layer for Windows kernel
+ - minor code tidyups/fixes
+
+release 3, 25th October 2009, svn revision 1652.
+ - added singly linked list (logical delete only)
+ - minor code tidyups/fixes
+
+release 4, 7th December 2009, svn revision 1716.
+ - added ARM support
+ - added benchmarking functionality to the test program
+ - fixed a profound and pervasive pointer
+ decleration bug; earlier releases of liblfds
+ *should not be used*
+
+release 5, 19th December 2009, svn revision 1738.
+ - fixed subtle queue bug, which also affected ringbuffer
+ and caused data re-ordering under high load
+ - added benchmarks for freelist, ringbuffer and stack
+
+release 6, 29th December 2009, svn revision 1746.
+ - fixed two implementation errors, which reduced performance,
+ spotted by Codeplug from "http://cboard.cprogramming.com".
+
+release 6.0.0, 18th December 2012, svn revision 2537
+ - introduction of namespaces, e.g. the "lfds600_" prefix
+ code otherwise COMPLETE AND WHOLLY UNCHANGED
+ this release is a stepping-stone to 6.1.0
+
--- /dev/null
+The Windows kernel build environment is primitive and has a number
+of severe limitations; in particular, all source files must be in
+one directory and it is not possible to choose the output binary type
+(static or dynamic library) from the build command line; rather,
+a string has to be modified in a text file used by the build (!)
+
+To deal with these limitations, it is necessary for a Windows kernel
+build to run a batch file prior to building.
+
+There are two batch files, one for static library builds and the other
+for dynamic library builds.
+
+They are both idempotent; you can run them as often as you like and
+switch between them as often as you want. It's all fine; whenever
+you run one of them, it will take you from whatever state you were
+previously in, into the state you want to be in.
+
+Both batch files copy all the sources file into a single directory,
+"/src/single_dir_for_windows_kernel/".
+
+The static library batch file will then copy "/sources.static" into
+"/src/single_dir_for_windows_kernel/", which will cause a static
+library to be built.
+
+The dynamic library batch file will then copy "/sources.dynamic" into
+"/src/single_dir_for_windows_kernel/", which will cause a dynamic
+library to be built. It will also copy "src/driver_entry.c" into
+"/src/single_dir_for_windows_kernel/", since the linker requires
+the DriverEntry function to exist for dynamic libraries, even
+though it's not used.
+
+
--- /dev/null
+@echo off
+rmdir /q /s src\single_dir_for_windows_kernel 1>nul 2>nul
+mkdir src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds600_abstraction\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds600_freelist\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds600_queue\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds600_ringbuffer\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds600_slist\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds600_stack\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y sources.dynamic src\single_dir_for_windows_kernel\sources 1>nul 2>nul
+copy /y src\driver_entry.c src\single_dir_for_windows_kernel 1>nul 2>nul
+echo Windows kernel dynamic library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+@echo off
+rmdir /q /s src\single_dir_for_windows_kernel 1>nul 2>nul
+mkdir src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds600_abstraction\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds600_freelist\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds600_queue\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds600_ringbuffer\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds600_slist\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds600_stack\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y sources.static src\single_dir_for_windows_kernel\sources 1>nul 2>nul
+erase /f src\single_dir_for_windows_kernel\driver_entry.c 1>nul 2>nul
+echo Windows kernel static library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /W4
+DLLDEF = ../../liblfds600.def
+TARGETNAME = liblfds600
+TARGETPATH = ../../bin/
+TARGETTYPE = EXPORT_DRIVER
+UMTYPE = nt
+USER_C_FLAGS = /DWIN_KERNEL_BUILD
+
+INCLUDES = ..;../../inc/
+SOURCES = lfds600_abstraction_aligned_free.c \
+ lfds600_abstraction_aligned_malloc.c \
+ lfds600_abstraction_cas.c \
+ lfds600_abstraction_dcas.c \
+ lfds600_abstraction_increment.c \
+ lfds600_freelist_delete.c \
+ lfds600_freelist_get_and_set.c \
+ lfds600_freelist_new.c \
+ lfds600_freelist_pop_push.c \
+ lfds600_freelist_query.c \
+ lfds600_queue_delete.c \
+ lfds600_queue_new.c \
+ lfds600_queue_query.c \
+ lfds600_queue_queue.c \
+ lfds600_ringbuffer_delete.c \
+ lfds600_ringbuffer_get_and_put.c \
+ lfds600_ringbuffer_new.c \
+ lfds600_ringbuffer_query.c \
+ lfds600_slist_delete.c \
+ lfds600_slist_get_and_set.c \
+ lfds600_slist_link.c \
+ lfds600_slist_new.c \
+ lfds600_stack_delete.c \
+ lfds600_stack_new.c \
+ lfds600_stack_push_pop.c \
+ lfds600_stack_query.c \
+ driver_entry.c
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /W4
+TARGETNAME = liblfds600
+TARGETPATH = ../../bin/
+TARGETTYPE = DRIVER_LIBRARY
+UMTYPE = nt
+USER_C_FLAGS = /DWIN_KERNEL_BUILD
+
+INCLUDES = ..;../../inc/
+SOURCES = lfds600_abstraction_aligned_free.c \
+ lfds600_abstraction_aligned_malloc.c \
+ lfds600_abstraction_cas.c \
+ lfds600_abstraction_dcas.c \
+ lfds600_abstraction_increment.c \
+ lfds600_freelist_delete.c \
+ lfds600_freelist_get_and_set.c \
+ lfds600_freelist_new.c \
+ lfds600_freelist_pop_push.c \
+ lfds600_freelist_query.c \
+ lfds600_queue_delete.c \
+ lfds600_queue_new.c \
+ lfds600_queue_query.c \
+ lfds600_queue_queue.c \
+ lfds600_ringbuffer_delete.c \
+ lfds600_ringbuffer_get_and_put.c \
+ lfds600_ringbuffer_new.c \
+ lfds600_ringbuffer_query.c \
+ lfds600_slist_delete.c \
+ lfds600_slist_get_and_set.c \
+ lfds600_slist_link.c \
+ lfds600_slist_new.c \
+ lfds600_stack_delete.c \
+ lfds600_stack_new.c \
+ lfds600_stack_push_pop.c \
+ lfds600_stack_query.c
+
--- /dev/null
+DIRS = single_dir_for_windows_kernel
+
+
--- /dev/null
+#include "lfds600_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+NTSTATUS DriverEntry( struct _DRIVER_OBJECT *DriverObject, PUNICODE_STRING RegistryPath )
+{
+ return( STATUS_SUCCESS );
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+This C file (driver_entry.c) is used when building a dynamic library for
+the Windows kernel. It exists to work around one of the limitations of
+that build environment. It is not used by any other build; just ignore it.
+
--- /dev/null
+#include "lfds600_abstraction_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ !WIN_KERNEL_BUILD indicates Windows user-mode
+ */
+
+ void lfds600_abstraction_aligned_free( void *memory )
+ {
+ _aligned_free( memory );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (_XOPEN_SOURCE >= 600)
+
+ /* TRD : any OS on any CPU with any compiler with POSIX 6.00 or better
+
+ _XOPEN_SOURCE is actually set by the user, not by the compiler
+ it is the way the user signals to the compiler what
+ level of POSIX should be available
+ (it assumes of course the compiler has support for the given level of POSIX requested)
+ */
+
+ void lfds600_abstraction_aligned_free( void *memory )
+ {
+ free( memory );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows (kernel) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ WIN_KERNEL_BUILD indicates Windows kernel
+ */
+
+ void lfds600_abstraction_aligned_free( void *memory )
+ {
+ ExFreePoolWithTag( memory, 'sdfl' );
+
+ return;
+ }
+
+#endif
+
--- /dev/null
+#include "lfds600_abstraction_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ !WIN_KERNEL_BUILD indicates Windows user-mode
+ */
+
+ void *lfds600_abstraction_aligned_malloc( size_t size, size_t align_in_bytes )
+ {
+ void
+ *rv;
+
+ rv = _aligned_malloc( size, align_in_bytes );
+
+ return( rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (_XOPEN_SOURCE >= 600)
+
+ /* TRD : any OS on any CPU with any compiler with POSIX 6.00 or better
+
+ _XOPEN_SOURCE is actually set by the user, not by the compiler
+ it is the way the user signals to the compiler what
+ level of POSIX should be available
+ (it assumes of course the compiler has support for the given level of POSIX requested)
+ */
+
+ void *lfds600_abstraction_aligned_malloc( size_t size, size_t align_in_bytes )
+ {
+ int
+ rv;
+
+ void
+ *memory;
+
+ rv = posix_memalign( &memory, align_in_bytes, size );
+
+ // TRD : posix_memalign returns 0 on success, docs do not say *memory == NULL on fail
+ if( rv != 0 )
+ memory = NULL;
+
+ return( memory );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows (kernel) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ WIN_KERNEL_BUILD indicates Windows kernel
+ */
+
+ void *lfds600_abstraction_aligned_malloc( size_t size, size_t align_in_bytes )
+ {
+ void
+ *rv;
+
+ /* TRD : ExAllocatePoolWithTag() allocates memory aligned on 8 bytes on 32-bit CPUs
+ and on 16 bytes on 64-bit CPUs, which is what we want
+
+ as such, align_in_bytes is not needed; we must refer to it to avoid the
+ compiler warning
+ */
+
+ align_in_bytes;
+
+ rv = ExAllocatePoolWithTag( NonPagedPool, size, 'sdfl' );
+
+ return( rv );
+ }
+
+#endif
+
--- /dev/null
+#include "lfds600_abstraction_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER)
+
+ /* TRD : 64 bit and 32 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ LFDS600_INLINE lfds600_atom_t lfds600_abstraction_cas( volatile lfds600_atom_t *destination, lfds600_atom_t exchange, lfds600_atom_t compare )
+ {
+ assert( destination != NULL );
+ // TRD : exchange can be any value in its range
+ // TRD : compare can be any value in its range
+
+ return( (lfds600_atom_t) _InterlockedCompareExchangePointer((void * volatile *) destination, (void *) exchange, (void *) compare) );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (!defined __arm__ && __GNUC__ >= 4 && __GNUC_MINOR__ >= 1 && __GNUC_PATCHLEVEL__ >= 0)
+
+ /* TRD : any OS on any CPU except ARM with GCC 4.1.0 or better
+
+ GCC 4.1.0 introduced the __sync_*() atomic intrinsics
+
+ __GNUC__ / __GNUC_MINOR__ / __GNUC_PATCHLEVEL__ indicates GCC and which version
+ */
+
+ LFDS600_INLINE lfds600_atom_t lfds600_abstraction_cas( volatile lfds600_atom_t *destination, lfds600_atom_t exchange, lfds600_atom_t compare )
+ {
+ assert( destination != NULL );
+ // TRD : exchange can be any value in its range
+ // TRD : compare can be any value in its range
+
+ // TRD : note the different argument order for the GCC instrinsic to the MSVC instrinsic
+
+ return( (lfds600_atom_t) __sync_val_compare_and_swap(destination, compare, exchange) );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined __arm__ && __GNUC__)
+
+ /* TRD : any OS on any ARM with GCC
+
+ Remember however we need to set into compare the original value of destination.
+
+ __arm__ indicates ARM
+ __GNUC__ indicates GCC
+ */
+
+ LFDS600_INLINE lfds600_atom_t lfds600_abstraction_cas( volatile lfds600_atom_t *destination, lfds600_atom_t exchange, lfds600_atom_t compare )
+ {
+ lfds600_atom_t
+ stored_flag,
+ original_destination;
+
+ assert( destination != NULL );
+ // TRD : exchange can be any value in its range
+ // TRD : compare can be any value in its range
+
+ /* TRD : this is a standard, plain CAS, vulnerable to ABA */
+
+ __asm__ __volatile__
+ (
+ " mov %[stored_flag], #1;" // put 1 into stored_flag
+ " mcr p15, 0, %[zero], c7, c10, 5;" // memory barrier (ARM v6 compatible)
+ "atomic_cas:;"
+ " ldrex %[original_destination], [%[destination]];" // load *destination into original_destination
+ " teq %[original_destination], %[compare];" // compare original_destination with compare
+ " bne exit;" // if not equal, exit
+ " strex %[stored_flag], %[exchange], [%[destination]];" // if equal, try to store exchange into *destination (on success, strex puts 0 into stored_flag)
+ " teq %[stored_flag], #0;" // check if stored_flag is 0
+ " bne atomic_cas;" // if not 0, retry (someone else touched *destination after we loaded but before we stored)
+ " mcr p15, 0, %[zero], c7, c10, 5;" // memory barrier (ARM v6 compatible)
+ "exit:;"
+
+ // output
+ : "+m" (*destination), [original_destination] "=&r" (original_destination), [stored_flag] "=&r" (stored_flag)
+
+ // input
+ : [destination] "r" (destination), [compare] "r" (compare), [exchange] "r" (exchange), [zero] "r" (0)
+
+ // clobbered
+ : "cc", "memory" // memory is clobbered because we issue a memory barrier
+ );
+
+ return( original_destination );
+ }
+
+#endif
+
--- /dev/null
+#include "lfds600_abstraction_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN64 && defined _MSC_VER)
+
+ /* TRD : 64 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ _WIN64 indicates 64 bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ LFDS600_INLINE unsigned char lfds600_abstraction_dcas( volatile lfds600_atom_t *destination, lfds600_atom_t *exchange, lfds600_atom_t *compare )
+ {
+ unsigned char
+ cas_result;
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ cas_result = _InterlockedCompareExchange128( (volatile __int64 *) destination, (__int64) *(exchange+1), (__int64) *exchange, (__int64 *) compare );
+
+ return( cas_result );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER)
+
+ /* TRD : 32 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ (!defined _WIN64 && defined _WIN32) indicates 32 bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ LFDS600_INLINE unsigned char lfds600_abstraction_dcas( volatile lfds600_atom_t *destination, lfds600_atom_t *exchange, lfds600_atom_t *compare )
+ {
+ __int64
+ original_compare;
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ *(__int64 *) &original_compare = *(__int64 *) compare;
+
+ *(__int64 *) compare = _InterlockedCompareExchange64( (volatile __int64 *) destination, *(__int64 *) exchange, *(__int64 *) compare );
+
+ return( (unsigned char) (*(__int64 *) compare == *(__int64 *) &original_compare) );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined __x86_64__ && __GNUC__ && !defined __pic__)
+
+ /* TRD : any OS on x64 with GCC for statically linked code
+
+ __x86_64__ indicates x64
+ __GNUC__ indicates GCC
+ */
+
+ LFDS600_INLINE unsigned char lfds600_abstraction_dcas( volatile lfds600_atom_t *destination, lfds600_atom_t *exchange, lfds600_atom_t *compare )
+ {
+ unsigned char
+ cas_result;
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ __asm__ __volatile__
+ (
+ "lock;" // make cmpxchg16b atomic
+ "cmpxchg16b %0;" // cmpxchg16b sets ZF on success
+ "setz %3;" // if ZF set, set cas_result to 1
+
+ // output
+ : "+m" (*(volatile lfds600_atom_t (*)[2]) destination), "+a" (*compare), "+d" (*(compare+1)), "=q" (cas_result)
+
+ // input
+ : "b" (*exchange), "c" (*(exchange+1))
+
+ // clobbered
+ : "cc", "memory"
+ );
+
+ return( cas_result );
+ }
+
+#endif
+
+
+
+
+
+
+/****************************************************************************/
+#if (defined __i686__ && __GNUC__ && !defined __pic__)
+
+ /* TRD : any OS on x86 with GCC for statically linked code
+
+ __i686__ indicates x86
+ __GNUC__ indicates GCC
+ */
+
+ LFDS600_INLINE unsigned char lfds600_abstraction_dcas( volatile lfds600_atom_t *destination, lfds600_atom_t *exchange, lfds600_atom_t *compare )
+ {
+ unsigned char
+ cas_result;
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ __asm__ __volatile__
+ (
+ "lock;" // make cmpxchg8b atomic
+ "cmpxchg8b %0;" // cmpxchg8b sets ZF on success
+ "setz %3;" // if ZF set, set cas_result to 1
+
+ // output
+ : "+m" (*(volatile lfds600_atom_t (*)[2]) destination), "+a" (*compare), "+d" (*(compare+1)), "=q" (cas_result)
+
+ // input
+ : "b" (*exchange), "c" (*(exchange+1))
+
+ // clobbered
+ : "cc", "memory"
+ );
+
+ return( cas_result );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined __x86_64__ && __GNUC__ && defined __pic__)
+
+ /* TRD : any OS on x64 with GCC for position independent code (e.g. a shared object)
+
+ __x86_64__ indicates x64
+ __GNUC__ indicates GCC
+ */
+
+ LFDS600_INLINE unsigned char lfds600_abstraction_dcas( volatile lfds600_atom_t *destination, lfds600_atom_t *exchange, lfds600_atom_t *compare )
+ {
+ unsigned char
+ cas_result;
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ /* TRD : with a shared object, we cannot clobber RBX
+ as such, we borrow RSI - we load half of the exchange value into it
+ then swap it with RBX
+ then do the compare-and-swap
+ then swap the original value of RBX back from RSI
+ */
+
+ __asm__ __volatile__
+ (
+ "xchg %%rsi, %%rbx;" // swap RBI and RBX
+ "lock;" // make cmpxchg16b atomic
+ "cmpxchg16b %0;" // cmpxchg16b sets ZF on success
+ "setz %3;" // if ZF set, set cas_result to 1
+ "xchg %%rbx, %%rsi;" // re-swap RBI and RBX
+
+ // output
+ : "+m" (*(volatile lfds600_atom_t (*)[2]) destination), "+a" (*compare), "+d" (*(compare+1)), "=q" (cas_result)
+
+ // input
+ : "S" (*exchange), "c" (*(exchange+1))
+
+ // clobbered
+ : "cc", "memory"
+ );
+
+ return( cas_result );
+ }
+
+#endif
+
+
+
+
+
+
+/****************************************************************************/
+#if (defined __i686__ && __GNUC__ && defined __pic__)
+
+ /* TRD : any OS on x86 with GCC for position independent code (e.g. a shared object)
+
+ __i686__ indicates x86
+ __GNUC__ indicates GCC
+ */
+
+ LFDS600_INLINE unsigned char lfds600_abstraction_dcas( volatile lfds600_atom_t *destination, lfds600_atom_t *exchange, lfds600_atom_t *compare )
+ {
+ unsigned char
+ cas_result;
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ /* TRD : with a shared object, we cannot clobber EBX
+ as such, we borrow ESI - we load half of the exchange value into it
+ then swap it with EBX
+ then do the compare-and-swap
+ then swap the original value of EBX back from ESI
+ */
+
+ __asm__ __volatile__
+ (
+ "xchg %%esi, %%ebx;" // swap EBI and EBX
+ "lock;" // make cmpxchg8b atomic
+ "cmpxchg8b %0;" // cmpxchg8b sets ZF on success
+ "setz %3;" // if ZF set, set cas_result to 1
+ "xchg %%ebx, %%esi;" // re-swap EBI and EBX
+
+ // output
+ : "+m" (*(volatile lfds600_atom_t (*)[2]) destination), "+a" (*compare), "+d" (*(compare+1)), "=q" (cas_result)
+
+ // input
+ : "S" (*exchange), "c" (*(exchange+1))
+
+ // clobbered
+ : "cc", "memory"
+ );
+
+ return( cas_result );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined __arm__ && __GNUC__)
+
+ /* TRD : any OS on any ARM with GCC
+
+ Remember however we need to set into compare the original value of destination.
+
+ __arm__ indicates ARM
+ __GNUC__ indicates GCC
+ */
+
+ LFDS600_INLINE unsigned char lfds600_abstraction_dcas( volatile lfds600_atom_t *destination, lfds600_atom_t *exchange, lfds600_atom_t *compare )
+ {
+ lfds600_atom_t
+ *local_compare = compare,
+ stored_flag = 1;
+
+ register lfds600_atom_t
+ local_exchange_a __asm("r2"),
+ local_exchange_b __asm("r3"),
+ local_compare_a __asm("r4"),
+ local_compare_b __asm("r5"),
+ original_destination_a __asm("r6"),
+ original_destination_b __asm("r7");
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ /* TRD : some notes
+
+ the double word ldr and str instructions require contigous registers
+ where the first register is an even number
+
+ honouring this requirement requires us to specifically specify
+ the registers to use (which is why we're using register __asm("rN")
+ in the declerations above
+
+ the arguments to the function occupy registers r0, r1 and r2
+
+ we can use up to and including r8, but r9 can have a frame pointer in it
+
+ so we make a copy of compare (freeing up r2, so we can use it for a double
+ word load) but use destination (r0) and exchange (r1) directly
+
+ note LDRD and STRD became available in armv6k
+
+ apologies for the trickery with the mcr register variable - the code runs
+ out of registers on armv6k
+ */
+
+ __asm__ __volatile__
+ (
+ " mov %[stored_flag], #1;" // put 1 into stored_flag
+ " mov %[local_exchange_a], #0;" // borrow local_exchange_a for mcr, to save a register
+ " mcr p15, 0, %[local_exchange_a], c7, c10, 5;" // memory barrier (ARM v6 compatible)
+ " ldrd %[local_exchange_a], %[local_exchange_b], [%[exchange]];" // load exchange into local_exchange_a and local_exchange_b (which are r2 and r3, respectively)
+ " ldrd %[local_compare_a], %[local_compare_b], [%[local_compare]];" // load compare into local_compare_a and local_compare_b (which are r4 and r5, respectively)
+ "atomic_dcas:;"
+ " ldrexd %[original_destination_a], %[original_destination_b], [%[destination]];" // load destination into original_destination_a and original_destination_b (which are r6 and r7, respectively)
+ " teq %[original_destination_a], %[local_compare_a];" // compare the first word of destination with the first word of compare
+ " teqeq %[original_destination_b], %[local_compare_b];" // if they're equal, compare the second word of destination with the second word of compare
+ " bne exit;" // if either word of destination does not match its respective word of compare, exit
+ " strexd %[stored_flag], %[local_exchange_a], %[local_exchange_b], [%[destination]];" // if both words were equal, try to store local_exchange_a and local_exchange_b into *destination (on success, strexed puts 0 into stored_flag)
+ " teq %[stored_flag], #0;" // check if stored_flag is 0
+ " bne atomic_dcas;" // if not 0, retry (someone else touched *destination after we loaded but before we stored)
+ "exit:;"
+ " strd %[original_destination_a], %[original_destination_b], [%[local_compare]];" // whether or not the CAS swapped, we always write the original value of destination into *compare
+ " mov %[local_exchange_a], #0;" // borrow local_exchange_a for mcr, to save a register
+ " mcr p15, 0, %[local_exchange_a], c7, c10, 5;" // memory barrier (ARM v6 compatible)
+
+ // output
+ : "+m" (*(volatile lfds600_atom_t (*)[2]) destination), "+m" (*(lfds600_atom_t (*)[2]) local_compare),
+ [stored_flag] "+&r" (stored_flag),
+ [original_destination_a] "+&r" (original_destination_a), [original_destination_b] "+&r" (original_destination_b),
+ [local_compare_a] "+&r" (local_compare_a), [local_compare_b] "+&r" (local_compare_b),
+ [local_exchange_a] "+&r" (local_exchange_a), [local_exchange_b] "+&r" (local_exchange_b)
+
+ // input
+ : "m" (*(lfds600_atom_t (*)[2]) exchange),
+ [destination] "r" (destination),
+ [local_compare] "r" (local_compare),
+ [exchange] "r" (exchange)
+
+ // clobbered
+ : "cc", "memory" // memory is clobbered because we issue a memory barrier
+ );
+
+ /* TRD : stored_flag is set to 0 on store, 1 on fail
+ we need to return 1 on success, 0 on fail
+ */
+
+ return( (unsigned char) !stored_flag );
+ }
+
+#endif
+
+
--- /dev/null
+#include "lfds600_abstraction_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN64 && defined _MSC_VER)
+
+ /* TRD : 64 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ _WIN64 indicates 64 bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ LFDS600_INLINE lfds600_atom_t lfds600_abstraction_increment( lfds600_atom_t *value )
+ {
+ __int64
+ rv;
+
+ assert( value != NULL );
+
+ rv = _InterlockedIncrement64( (__int64 *) value );
+
+ return( (lfds600_atom_t) rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER)
+
+ /* TRD : 32 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ (!defined _WIN64 && defined _WIN32) indicates 32 bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ LFDS600_INLINE lfds600_atom_t lfds600_abstraction_increment( lfds600_atom_t *value )
+ {
+ long int
+ rv;
+
+ assert( value != NULL );
+
+ rv = _InterlockedIncrement( (long int *) value );
+
+ return( (lfds600_atom_t) rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (!defined __arm__ && __GNUC__ >= 4 && __GNUC_MINOR__ >= 1 && __GNUC_PATCHLEVEL__ >= 0)
+
+ /* TRD : any OS on any CPU with GCC 4.1.0 or better
+
+ GCC 4.1.0 introduced the __sync_*() atomic intrinsics
+
+ __GNUC__ / __GNUC_MINOR__ / __GNUC_PATCHLEVEL__ indicates GCC and which version
+ */
+
+ LFDS600_INLINE lfds600_atom_t lfds600_abstraction_increment( lfds600_atom_t *value )
+ {
+ lfds600_atom_t
+ rv;
+
+ assert( value != NULL );
+
+ // TRD : no need for casting here, GCC has a __sync_add_and_fetch() for all native types
+
+ rv = __sync_add_and_fetch( value, 1 );
+
+ return( rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined __arm__ && __GNUC__ >= 4)
+
+ /* TRD : any OS on any CPU with GCC 4.1.0 or better
+
+ GCC 4.1.0 introduced the __sync_*() atomic intrinsics
+
+ __arm__ indicates ARM
+ __GNUC__ indicates GCC
+ */
+
+ LFDS600_INLINE lfds600_atom_t lfds600_abstraction_increment( lfds600_atom_t *value )
+ {
+ lfds600_atom_t
+ stored_flag = 0,
+ new_value = 0;
+
+ assert( value != NULL );
+
+ __asm__ __volatile__
+ (
+ " mov %[stored_flag], #1;" // move 1 into stored_flag
+ " mcr p15, 0, %[zero], c7, c10, 5;" // memory barrier (ARM v6 compatible)
+ "atomic_add:;"
+ " ldrex %[new_value], [%[value]]; " // load *value into new_value
+ " add %[new_value], #1;" // add 1 to new_value
+ " strex %[stored_flag], %[new_value], [%[value]];" // try to store new_value into *value (on success, strex puts 0 into stored_flag)
+ " teq %[stored_flag], #0;" // check if stored_flag is 0
+ " bne atomic_add;" // if not 0, retry (someone else touched *value after we loaded but before we stored)
+ " mcr p15, 0, %[zero], c7, c10, 5;" // memory barrier (ARM v6 compatible)
+
+ // output
+ : "+m" (*value), [new_value] "+&r" (new_value), [stored_flag] "+&r" (stored_flag)
+
+ // input
+ : [value] "r" (value), [zero] "r" (0)
+
+ // clobbered
+ : "cc", "memory" // memory is clobbered because we issue a memory barrier
+ );
+
+ return( new_value );
+ }
+
+#endif
+
--- /dev/null
+/***** the library wide include file *****/
+#include "lfds600_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+#include "lfds600_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds600_freelist_delete( struct lfds600_freelist_state *fs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )
+{
+ struct lfds600_freelist_element
+ *fe;
+
+ void
+ *user_data;
+
+ assert( fs != NULL );
+ // TRD : user_data_delete_function can be NULL
+ // TRD : user_state can be NULL
+
+ while( lfds600_freelist_pop(fs, &fe) )
+ {
+ if( user_data_delete_function != NULL )
+ {
+ lfds600_freelist_get_user_data_from_element( fe, &user_data );
+ user_data_delete_function( user_data, user_state );
+ }
+
+ lfds600_abstraction_aligned_free( fe );
+ }
+
+ lfds600_abstraction_aligned_free( fs );
+
+ return;
+}
+
--- /dev/null
+#include "lfds600_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void *lfds600_freelist_get_user_data_from_element( struct lfds600_freelist_element *fe, void **user_data )
+{
+ assert( fe != NULL );
+ // TRD : user_data can be NULL
+
+ if( user_data != NULL )
+ *user_data = fe->user_data;
+
+ return( fe->user_data );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_freelist_set_user_data_in_element( struct lfds600_freelist_element *fe, void *user_data )
+{
+ assert( fe != NULL );
+ // TRD : user_data can be NULL
+
+ fe->user_data = user_data;
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "lfds600_internal.h"
+
+/***** defines *****/
+#define LFDS600_FREELIST_POINTER 0
+#define LFDS600_FREELIST_COUNTER 1
+#define LFDS600_FREELIST_PAC_SIZE 2
+
+/***** structures *****/
+#pragma pack( push, LFDS600_ALIGN_DOUBLE_POINTER )
+
+struct lfds600_freelist_state
+{
+ struct lfds600_freelist_element
+ *volatile top[LFDS600_FREELIST_PAC_SIZE];
+
+ int
+ (*user_data_init_function)( void **user_data, void *user_state );
+
+ void
+ *user_state;
+
+ lfds600_atom_t
+ aba_counter,
+ element_count;
+};
+
+struct lfds600_freelist_element
+{
+ struct lfds600_freelist_element
+ *next[LFDS600_FREELIST_PAC_SIZE];
+
+ void
+ *user_data;
+};
+
+#pragma pack( pop )
+
+/***** private prototypes *****/
+lfds600_atom_t lfds600_freelist_internal_new_element( struct lfds600_freelist_state *fs, struct lfds600_freelist_element **fe );
+void lfds600_freelist_internal_validate( struct lfds600_freelist_state *fs, struct lfds600_validation_info *vi, enum data_structure_validity *lfds600_freelist_validity );
+
--- /dev/null
+#include "lfds600_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds600_freelist_new( struct lfds600_freelist_state **fs, lfds600_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state )
+{
+ int
+ rv = 0;
+
+ lfds600_atom_t
+ element_count;
+
+ assert( fs != NULL );
+ // TRD : number_elements can be any value in its range
+ // TRD : user_data_init_function can be NULL
+
+ *fs = (struct lfds600_freelist_state *) lfds600_abstraction_aligned_malloc( sizeof(struct lfds600_freelist_state), LFDS600_ALIGN_DOUBLE_POINTER );
+
+ if( (*fs) != NULL )
+ {
+ (*fs)->top[LFDS600_FREELIST_POINTER] = NULL;
+ (*fs)->top[LFDS600_FREELIST_COUNTER] = 0;
+ (*fs)->user_data_init_function = user_data_init_function;
+ (*fs)->user_state = user_state;
+ (*fs)->aba_counter = 0;
+ (*fs)->element_count = 0;
+
+ element_count = lfds600_freelist_new_elements( *fs, number_elements );
+
+ if( element_count == number_elements )
+ rv = 1;
+
+ if( element_count != number_elements )
+ {
+ lfds600_abstraction_aligned_free( (*fs) );
+ *fs = NULL;
+ }
+ }
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+lfds600_atom_t lfds600_freelist_new_elements( struct lfds600_freelist_state *fs, lfds600_atom_t number_elements )
+{
+ struct lfds600_freelist_element
+ *fe;
+
+ lfds600_atom_t
+ loop,
+ count = 0;
+
+ assert( fs != NULL );
+ // TRD : number_elements can be any value in its range
+ // TRD : user_data_init_function can be NULL
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( lfds600_freelist_internal_new_element(fs, &fe) )
+ {
+ lfds600_freelist_push( fs, fe );
+ count++;
+ }
+
+ return( count );
+}
+
+
+
+
+
+/****************************************************************************/
+lfds600_atom_t lfds600_freelist_internal_new_element( struct lfds600_freelist_state *fs, struct lfds600_freelist_element **fe )
+{
+ lfds600_atom_t
+ rv = 0;
+
+ assert( fs != NULL );
+ assert( fe != NULL );
+
+ /* TRD : basically, does what you'd expect;
+
+ allocates an element
+ calls the user init function
+ if anything fails, cleans up,
+ sets *fe to NULL
+ and returns 0
+ */
+
+ *fe = (struct lfds600_freelist_element *) lfds600_abstraction_aligned_malloc( sizeof(struct lfds600_freelist_element), LFDS600_ALIGN_DOUBLE_POINTER );
+
+ if( *fe != NULL )
+ {
+ if( fs->user_data_init_function == NULL )
+ {
+ (*fe)->user_data = NULL;
+ rv = 1;
+ }
+
+ if( fs->user_data_init_function != NULL )
+ {
+ rv = fs->user_data_init_function( &(*fe)->user_data, fs->user_state );
+
+ if( rv == 0 )
+ {
+ lfds600_abstraction_aligned_free( *fe );
+ *fe = NULL;
+ }
+ }
+ }
+
+ if( rv == 1 )
+ lfds600_abstraction_increment( (lfds600_atom_t *) &fs->element_count );
+
+ return( rv );
+}
+
--- /dev/null
+#include "lfds600_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+struct lfds600_freelist_element *lfds600_freelist_pop( struct lfds600_freelist_state *fs, struct lfds600_freelist_element **fe )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) struct lfds600_freelist_element
+ *fe_local[LFDS600_FREELIST_PAC_SIZE];
+
+ assert( fs != NULL );
+ assert( fe != NULL );
+
+ fe_local[LFDS600_FREELIST_COUNTER] = fs->top[LFDS600_FREELIST_COUNTER];
+ fe_local[LFDS600_FREELIST_POINTER] = fs->top[LFDS600_FREELIST_POINTER];
+
+ /* TRD : note that lfds600_abstraction_dcas loads the original value of the destination (fs->top) into the compare (fe_local)
+ (this happens of course after the CAS itself has occurred inside lfds600_abstraction_dcas)
+ */
+
+ do
+ {
+ if( fe_local[LFDS600_FREELIST_POINTER] == NULL )
+ {
+ *fe = NULL;
+ return( *fe );
+ }
+ }
+ while( 0 == lfds600_abstraction_dcas((volatile lfds600_atom_t *) fs->top, (lfds600_atom_t *) fe_local[LFDS600_FREELIST_POINTER]->next, (lfds600_atom_t *) fe_local) );
+
+ *fe = (struct lfds600_freelist_element *) fe_local[LFDS600_FREELIST_POINTER];
+
+ return( *fe );
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds600_freelist_element *lfds600_freelist_guaranteed_pop( struct lfds600_freelist_state *fs, struct lfds600_freelist_element **fe )
+{
+ assert( fs != NULL );
+ assert( fe != NULL );
+
+ lfds600_freelist_internal_new_element( fs, fe );
+
+ return( *fe );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_freelist_push( struct lfds600_freelist_state *fs, struct lfds600_freelist_element *fe )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) struct lfds600_freelist_element
+ *fe_local[LFDS600_FREELIST_PAC_SIZE],
+ *original_fe_next[LFDS600_FREELIST_PAC_SIZE];
+
+ assert( fs != NULL );
+ assert( fe != NULL );
+
+ fe_local[LFDS600_FREELIST_POINTER] = fe;
+ fe_local[LFDS600_FREELIST_COUNTER] = (struct lfds600_freelist_element *) lfds600_abstraction_increment( (lfds600_atom_t *) &fs->aba_counter );
+
+ original_fe_next[LFDS600_FREELIST_POINTER] = fs->top[LFDS600_FREELIST_POINTER];
+ original_fe_next[LFDS600_FREELIST_COUNTER] = fs->top[LFDS600_FREELIST_COUNTER];
+
+ /* TRD : note that lfds600_abstraction_dcas loads the original value of the destination (fs->top) into the compare (original_fe_next)
+ (this happens of course after the CAS itself has occurred inside lfds600_abstraction_dcas)
+ this then causes us in our loop, should we repeat it, to update fe_local->next to a more
+ up-to-date version of the head of the lfds600_freelist
+ */
+
+ do
+ {
+ fe_local[LFDS600_FREELIST_POINTER]->next[LFDS600_FREELIST_POINTER] = original_fe_next[LFDS600_FREELIST_POINTER];
+ fe_local[LFDS600_FREELIST_POINTER]->next[LFDS600_FREELIST_COUNTER] = original_fe_next[LFDS600_FREELIST_COUNTER];
+ }
+ while( 0 == lfds600_abstraction_dcas((volatile lfds600_atom_t *) fs->top, (lfds600_atom_t *) fe_local, (lfds600_atom_t *) original_fe_next) );
+
+ return;
+}
+
--- /dev/null
+#include "lfds600_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds600_freelist_query( struct lfds600_freelist_state *fs, enum lfds600_freelist_query_type query_type, void *query_input, void *query_output )
+{
+ assert( fs != NULL );
+ // TRD : query type can be any value in its range
+ // TRD : query_input can be NULL in some cases
+ assert( query_output != NULL );
+
+ switch( query_type )
+ {
+ case LFDS600_FREELIST_QUERY_ELEMENT_COUNT:
+ assert( query_input == NULL );
+
+ *(lfds600_atom_t *) query_output = fs->element_count;
+ break;
+
+ case LFDS600_FREELIST_QUERY_VALIDATE:
+ // TRD : query_input can be NULL
+
+ lfds600_freelist_internal_validate( fs, (struct lfds600_validation_info *) query_input, (enum data_structure_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_freelist_internal_validate( struct lfds600_freelist_state *fs, struct lfds600_validation_info *vi, enum data_structure_validity *lfds600_freelist_validity )
+{
+ struct lfds600_freelist_element
+ *fe,
+ *fe_slow,
+ *fe_fast;
+
+ lfds600_atom_t
+ element_count = 0;
+
+ assert( fs != NULL );
+ // TRD : vi can be NULL
+ assert( lfds600_freelist_validity != NULL );
+
+ *lfds600_freelist_validity = VALIDITY_VALID;
+
+ fe_slow = fe_fast = (struct lfds600_freelist_element *) fs->top[LFDS600_FREELIST_POINTER];
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the top of the lfds600_freelist
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the lfds600_freelist)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( fe_slow != NULL )
+ do
+ {
+ fe_slow = fe_slow->next[LFDS600_FREELIST_POINTER];
+
+ if( fe_fast != NULL )
+ fe_fast = fe_fast->next[LFDS600_FREELIST_POINTER];
+
+ if( fe_fast != NULL )
+ fe_fast = fe_fast->next[LFDS600_FREELIST_POINTER];
+ }
+ while( fe_slow != NULL and fe_fast != fe_slow );
+
+ if( fe_fast != NULL and fe_slow != NULL and fe_fast == fe_slow )
+ *lfds600_freelist_validity = VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds600_freelist_validity == VALIDITY_VALID and vi != NULL )
+ {
+ fe = (struct lfds600_freelist_element *) fs->top[LFDS600_FREELIST_POINTER];
+
+ while( fe != NULL )
+ {
+ element_count++;
+ fe = (struct lfds600_freelist_element *) fe->next[LFDS600_FREELIST_POINTER];
+ }
+
+ if( element_count < vi->min_elements )
+ *lfds600_freelist_validity = VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( element_count > vi->max_elements )
+ *lfds600_freelist_validity = VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** public prototypes *****/
+#include "liblfds600.h"
+
+/***** defines *****/
+#define and &&
+#define or ||
+
+#define RAISED 1
+#define LOWERED 0
+
+#define NO_FLAGS 0x0
+
--- /dev/null
+#include "lfds600_queue_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds600_queue_delete( struct lfds600_queue_state *qs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )
+{
+ void
+ *user_data;
+
+ assert( qs != NULL );
+ // TRD : user_data_delete_function can be NULL
+ // TRD : user_state can be NULL
+
+ while( lfds600_queue_dequeue(qs, &user_data) )
+ if( user_data_delete_function != NULL )
+ user_data_delete_function( user_data, user_state );
+
+ /* TRD : fully dequeuing will leave us
+ with a single dummy element
+ which both qs->enqueue and qs->dequeue point at
+ we push this back onto the lfds600_freelist
+ before we delete the lfds600_freelist
+ */
+
+ lfds600_freelist_push( qs->fs, qs->enqueue[LFDS600_QUEUE_POINTER]->fe );
+
+ lfds600_freelist_delete( qs->fs, lfds600_queue_internal_freelist_delete_function, NULL );
+
+ lfds600_abstraction_aligned_free( qs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds600_queue_internal_freelist_delete_function( void *user_data, void *user_state )
+{
+ assert( user_data != NULL );
+ assert( user_state == NULL );
+
+ lfds600_abstraction_aligned_free( user_data );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** the library wide include file *****/
+#include "lfds600_internal.h"
+
+/***** pragmas *****/
+
+/***** defines *****/
+#define LFDS600_QUEUE_STATE_UNKNOWN -1
+#define LFDS600_QUEUE_STATE_EMPTY 0
+#define LFDS600_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE 1
+#define LFDS600_QUEUE_STATE_ATTEMPT_DELFDS600_QUEUE 2
+
+#define LFDS600_QUEUE_POINTER 0
+#define LFDS600_QUEUE_COUNTER 1
+#define LFDS600_QUEUE_PAC_SIZE 2
+
+/***** structures *****/
+#pragma pack( push, LFDS600_ALIGN_DOUBLE_POINTER )
+
+struct lfds600_queue_state
+{
+ struct lfds600_queue_element
+ *volatile enqueue[LFDS600_QUEUE_PAC_SIZE],
+ *volatile dequeue[LFDS600_QUEUE_PAC_SIZE];
+
+ lfds600_atom_t
+ aba_counter;
+
+ struct lfds600_freelist_state
+ *fs;
+};
+
+struct lfds600_queue_element
+{
+ // TRD : next in a lfds600_queue requires volatile as it is target of CAS
+ struct lfds600_queue_element
+ *volatile next[LFDS600_QUEUE_PAC_SIZE];
+
+ struct lfds600_freelist_element
+ *fe;
+
+ void
+ *user_data;
+};
+
+#pragma pack( pop )
+
+/***** externs *****/
+
+/***** private prototypes *****/
+int lfds600_queue_internal_freelist_init_function( void **user_data, void *user_state );
+void lfds600_queue_internal_freelist_delete_function( void *user_data, void *user_state );
+
+void lfds600_queue_internal_new_element_from_freelist( struct lfds600_queue_state *qs, struct lfds600_queue_element *qe[LFDS600_QUEUE_PAC_SIZE], void *user_data );
+void lfds600_queue_internal_guaranteed_new_element_from_freelist( struct lfds600_queue_state *qs, struct lfds600_queue_element * qe[LFDS600_QUEUE_PAC_SIZE], void *user_data );
+void lfds600_queue_internal_init_element( struct lfds600_queue_state *qs, struct lfds600_queue_element *qe[LFDS600_QUEUE_PAC_SIZE], struct lfds600_freelist_element *fe, void *user_data );
+
+void lfds600_queue_internal_queue( struct lfds600_queue_state *qs, struct lfds600_queue_element *qe[LFDS600_QUEUE_PAC_SIZE] );
+
+void lfds600_queue_internal_validate( struct lfds600_queue_state *qs, struct lfds600_validation_info *vi, enum data_structure_validity *lfds600_queue_validity, enum data_structure_validity *lfds600_freelist_validity );
+
--- /dev/null
+#include "lfds600_queue_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds600_queue_new( struct lfds600_queue_state **qs, lfds600_atom_t number_elements )
+{
+ int
+ rv = 0;
+
+ struct lfds600_queue_element
+ *qe[LFDS600_QUEUE_PAC_SIZE];
+
+ assert( qs != NULL );
+ // TRD : number_elements can be any value in its range
+
+ *qs = (struct lfds600_queue_state *) lfds600_abstraction_aligned_malloc( sizeof(struct lfds600_queue_state), LFDS600_ALIGN_DOUBLE_POINTER );
+
+ if( *qs != NULL )
+ {
+ // TRD : the size of the lfds600_freelist is the size of the lfds600_queue (+1 for the leading dummy element, which is hidden from the caller)
+ lfds600_freelist_new( &(*qs)->fs, number_elements+1, lfds600_queue_internal_freelist_init_function, NULL );
+
+ if( (*qs)->fs != NULL )
+ {
+ lfds600_queue_internal_new_element_from_freelist( *qs, qe, NULL );
+ (*qs)->enqueue[LFDS600_QUEUE_POINTER] = (*qs)->dequeue[LFDS600_QUEUE_POINTER] = qe[LFDS600_QUEUE_POINTER];
+ (*qs)->aba_counter = 0;
+ rv = 1;
+ }
+
+ if( (*qs)->fs == NULL )
+ {
+ lfds600_abstraction_aligned_free( *qs );
+ *qs = NULL;
+ }
+ }
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int lfds600_queue_internal_freelist_init_function( void **user_data, void *user_state )
+{
+ int
+ rv = 0;
+
+ assert( user_data != NULL );
+ assert( user_state == NULL );
+
+ *user_data = lfds600_abstraction_aligned_malloc( sizeof(struct lfds600_queue_element), LFDS600_ALIGN_DOUBLE_POINTER );
+
+ if( *user_data != NULL )
+ rv = 1;
+
+ return( rv );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+void lfds600_queue_internal_new_element_from_freelist( struct lfds600_queue_state *qs, struct lfds600_queue_element *qe[LFDS600_QUEUE_PAC_SIZE], void *user_data )
+{
+ struct lfds600_freelist_element
+ *fe;
+
+ assert( qs != NULL );
+ assert( qe != NULL );
+ // TRD : user_data can be any value in its range
+
+ qe[LFDS600_QUEUE_POINTER] = NULL;
+
+ lfds600_freelist_pop( qs->fs, &fe );
+
+ if( fe != NULL )
+ lfds600_queue_internal_init_element( qs, qe, fe, user_data );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_queue_internal_guaranteed_new_element_from_freelist( struct lfds600_queue_state *qs, struct lfds600_queue_element *qe[LFDS600_QUEUE_PAC_SIZE], void *user_data )
+{
+ struct lfds600_freelist_element
+ *fe;
+
+ assert( qs != NULL );
+ assert( qe != NULL );
+ // TRD : user_data can be any value in its range
+
+ qe[LFDS600_QUEUE_POINTER] = NULL;
+
+ lfds600_freelist_guaranteed_pop( qs->fs, &fe );
+
+ if( fe != NULL )
+ lfds600_queue_internal_init_element( qs, qe, fe, user_data );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_queue_internal_init_element( struct lfds600_queue_state *qs, struct lfds600_queue_element *qe[LFDS600_QUEUE_PAC_SIZE], struct lfds600_freelist_element *fe, void *user_data )
+{
+ assert( qs != NULL );
+ assert( qe != NULL );
+ assert( fe != NULL );
+ // TRD : user_data can be any value in its range
+
+ lfds600_freelist_get_user_data_from_element( fe, (void **) &qe[LFDS600_QUEUE_POINTER] );
+ qe[LFDS600_QUEUE_COUNTER] = (struct lfds600_queue_element *) lfds600_abstraction_increment( (lfds600_atom_t *) &qs->aba_counter );
+
+ qe[LFDS600_QUEUE_POINTER]->next[LFDS600_QUEUE_POINTER] = NULL;
+ qe[LFDS600_QUEUE_POINTER]->next[LFDS600_QUEUE_COUNTER] = (struct lfds600_queue_element *) lfds600_abstraction_increment( (lfds600_atom_t *) &qs->aba_counter );
+
+ qe[LFDS600_QUEUE_POINTER]->fe = fe;
+ qe[LFDS600_QUEUE_POINTER]->user_data = user_data;
+
+ return;
+}
+
--- /dev/null
+#include "lfds600_queue_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds600_queue_query( struct lfds600_queue_state *qs, enum lfds600_queue_query_type query_type, void *query_input, void *query_output )
+{
+ assert( qs != NULL );
+ // TRD : query_type can be any value in its range
+ // TRD : query_input can be NULL
+ assert( query_output != NULL );
+
+ switch( query_type )
+ {
+ case LFDS600_QUEUE_QUERY_ELEMENT_COUNT:
+ assert( query_input == NULL );
+
+ lfds600_freelist_query( qs->fs, LFDS600_FREELIST_QUERY_ELEMENT_COUNT, NULL, query_output );
+ break;
+
+ case LFDS600_QUEUE_QUERY_VALIDATE:
+ // TRD : query_input can be NULL
+
+ lfds600_queue_internal_validate( qs, (struct lfds600_validation_info *) query_input, (enum data_structure_validity *) query_output, ((enum data_structure_validity *) query_output)+1 );
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+void lfds600_queue_internal_validate( struct lfds600_queue_state *qs, struct lfds600_validation_info *vi, enum data_structure_validity *lfds600_queue_validity, enum data_structure_validity *lfds600_freelist_validity )
+{
+ struct lfds600_queue_element
+ *qe,
+ *qe_slow,
+ *qe_fast;
+
+ lfds600_atom_t
+ element_count = 0,
+ total_elements;
+
+ struct lfds600_validation_info
+ lfds600_freelist_vi;
+
+ assert( qs != NULL );
+ // TRD : vi can be NULL
+ assert( lfds600_queue_validity != NULL );
+ assert( lfds600_freelist_validity != NULL );
+
+ *lfds600_queue_validity = VALIDITY_VALID;
+
+ qe_slow = qe_fast = (struct lfds600_queue_element *) qs->dequeue[LFDS600_QUEUE_POINTER];
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the dequeue end of the lfds600_queue
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the lfds600_queue)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( qe_slow != NULL )
+ do
+ {
+ qe_slow = qe_slow->next[LFDS600_QUEUE_POINTER];
+
+ if( qe_fast != NULL )
+ qe_fast = qe_fast->next[LFDS600_QUEUE_POINTER];
+
+ if( qe_fast != NULL )
+ qe_fast = qe_fast->next[LFDS600_QUEUE_POINTER];
+ }
+ while( qe_slow != NULL and qe_fast != qe_slow );
+
+ if( qe_fast != NULL and qe_slow != NULL and qe_fast == qe_slow )
+ *lfds600_queue_validity = VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds600_queue_validity == VALIDITY_VALID and vi != NULL )
+ {
+ qe = (struct lfds600_queue_element *) qs->dequeue[LFDS600_QUEUE_POINTER];
+
+ while( qe != NULL )
+ {
+ element_count++;
+ qe = (struct lfds600_queue_element *) qe->next[LFDS600_QUEUE_POINTER];
+ }
+
+ /* TRD : remember there is a dummy element in the lfds600_queue */
+ element_count--;
+
+ if( element_count < vi->min_elements )
+ *lfds600_queue_validity = VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( element_count > vi->max_elements )
+ *lfds600_queue_validity = VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ /* TRD : now we validate the lfds600_freelist
+
+ we may be able to check for the expected number of
+ elements in the lfds600_freelist
+
+ if the caller has given us an expected min and max
+ number of elements in the lfds600_queue, then the total number
+ of elements in the lfds600_freelist, minus that min and max,
+ gives us the expected number of elements in the
+ lfds600_freelist
+ */
+
+ if( vi != NULL )
+ {
+ lfds600_freelist_query( qs->fs, LFDS600_FREELIST_QUERY_ELEMENT_COUNT, NULL, (void *) &total_elements );
+
+ /* TRD : remember there is a dummy element in the lfds600_queue */
+ total_elements--;
+
+ lfds600_freelist_vi.min_elements = total_elements - vi->max_elements;
+ lfds600_freelist_vi.max_elements = total_elements - vi->min_elements;
+
+ lfds600_freelist_query( qs->fs, LFDS600_FREELIST_QUERY_VALIDATE, (void *) &lfds600_freelist_vi, (void *) lfds600_freelist_validity );
+ }
+
+ if( vi == NULL )
+ lfds600_freelist_query( qs->fs, LFDS600_FREELIST_QUERY_VALIDATE, NULL, (void *) lfds600_freelist_validity );
+
+ return;
+}
+
--- /dev/null
+#include "lfds600_queue_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds600_queue_enqueue( struct lfds600_queue_state *qs, void *user_data )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) struct lfds600_queue_element
+ *qe[LFDS600_QUEUE_PAC_SIZE];
+
+ assert( qs != NULL );
+ // TRD : user_data can be NULL
+
+ lfds600_queue_internal_new_element_from_freelist( qs, qe, user_data );
+
+ if( qe[LFDS600_QUEUE_POINTER] == NULL )
+ return( 0 );
+
+ lfds600_queue_internal_queue( qs, qe );
+
+ return( 1 );
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds600_queue_guaranteed_enqueue( struct lfds600_queue_state *qs, void *user_data )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) struct lfds600_queue_element
+ *qe[LFDS600_QUEUE_PAC_SIZE];
+
+ assert( qs != NULL );
+ // TRD : user_data can be NULL
+
+ lfds600_queue_internal_guaranteed_new_element_from_freelist( qs, qe, user_data );
+
+ if( qe[LFDS600_QUEUE_POINTER] == NULL )
+ return( 0 );
+
+ lfds600_queue_internal_queue( qs, qe );
+
+ return( 1 );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_queue_internal_queue( struct lfds600_queue_state *qs, struct lfds600_queue_element *qe[LFDS600_QUEUE_PAC_SIZE] )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) struct lfds600_queue_element
+ *enqueue[LFDS600_QUEUE_PAC_SIZE],
+ *next[LFDS600_QUEUE_PAC_SIZE];
+
+ unsigned char
+ cas_result = 0;
+
+ assert( qs != NULL );
+ assert( qe != NULL );
+
+ do
+ {
+ enqueue[LFDS600_QUEUE_POINTER] = qs->enqueue[LFDS600_QUEUE_POINTER];
+ enqueue[LFDS600_QUEUE_COUNTER] = qs->enqueue[LFDS600_QUEUE_COUNTER];
+
+ next[LFDS600_QUEUE_POINTER] = enqueue[LFDS600_QUEUE_POINTER]->next[LFDS600_QUEUE_POINTER];
+ next[LFDS600_QUEUE_COUNTER] = enqueue[LFDS600_QUEUE_POINTER]->next[LFDS600_QUEUE_COUNTER];
+
+ /* TRD : this if() ensures that the next we read, just above,
+ really is from qs->enqueue (which we copied into enqueue)
+ */
+
+ if( qs->enqueue[LFDS600_QUEUE_POINTER] == enqueue[LFDS600_QUEUE_POINTER] and qs->enqueue[LFDS600_QUEUE_COUNTER] == enqueue[LFDS600_QUEUE_COUNTER] )
+ {
+ if( next[LFDS600_QUEUE_POINTER] == NULL )
+ {
+ qe[LFDS600_QUEUE_COUNTER] = next[LFDS600_QUEUE_COUNTER] + 1;
+ cas_result = lfds600_abstraction_dcas( (volatile lfds600_atom_t *) enqueue[LFDS600_QUEUE_POINTER]->next, (lfds600_atom_t *) qe, (lfds600_atom_t *) next );
+ }
+ else
+ {
+ next[LFDS600_QUEUE_COUNTER] = enqueue[LFDS600_QUEUE_COUNTER] + 1;
+ lfds600_abstraction_dcas( (volatile lfds600_atom_t *) qs->enqueue, (lfds600_atom_t *) next, (lfds600_atom_t *) enqueue );
+ }
+ }
+ }
+ while( cas_result == 0 );
+
+ qe[LFDS600_QUEUE_COUNTER] = enqueue[LFDS600_QUEUE_COUNTER] + 1;
+ lfds600_abstraction_dcas( (volatile lfds600_atom_t *) qs->enqueue, (lfds600_atom_t *) qe, (lfds600_atom_t *) enqueue );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds600_queue_dequeue( struct lfds600_queue_state *qs, void **user_data )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) struct lfds600_queue_element
+ *enqueue[LFDS600_QUEUE_PAC_SIZE],
+ *dequeue[LFDS600_QUEUE_PAC_SIZE],
+ *next[LFDS600_QUEUE_PAC_SIZE];
+
+ unsigned char
+ cas_result = 0;
+
+ int
+ rv = 1,
+ state = LFDS600_QUEUE_STATE_UNKNOWN,
+ finished_flag = LOWERED;
+
+ assert( qs != NULL );
+ assert( user_data != NULL );
+
+ do
+ {
+ dequeue[LFDS600_QUEUE_POINTER] = qs->dequeue[LFDS600_QUEUE_POINTER];
+ dequeue[LFDS600_QUEUE_COUNTER] = qs->dequeue[LFDS600_QUEUE_COUNTER];
+
+ enqueue[LFDS600_QUEUE_POINTER] = qs->enqueue[LFDS600_QUEUE_POINTER];
+ enqueue[LFDS600_QUEUE_COUNTER] = qs->enqueue[LFDS600_QUEUE_COUNTER];
+
+ next[LFDS600_QUEUE_POINTER] = dequeue[LFDS600_QUEUE_POINTER]->next[LFDS600_QUEUE_POINTER];
+ next[LFDS600_QUEUE_COUNTER] = dequeue[LFDS600_QUEUE_POINTER]->next[LFDS600_QUEUE_COUNTER];
+
+ /* TRD : confirm that dequeue didn't move between reading it
+ and reading its next pointer
+ */
+
+ if( dequeue[LFDS600_QUEUE_POINTER] == qs->dequeue[LFDS600_QUEUE_POINTER] and dequeue[LFDS600_QUEUE_COUNTER] == qs->dequeue[LFDS600_QUEUE_COUNTER] )
+ {
+ if( enqueue[LFDS600_QUEUE_POINTER] == dequeue[LFDS600_QUEUE_POINTER] and next[LFDS600_QUEUE_POINTER] == NULL )
+ state = LFDS600_QUEUE_STATE_EMPTY;
+
+ if( enqueue[LFDS600_QUEUE_POINTER] == dequeue[LFDS600_QUEUE_POINTER] and next[LFDS600_QUEUE_POINTER] != NULL )
+ state = LFDS600_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE;
+
+ if( enqueue[LFDS600_QUEUE_POINTER] != dequeue[LFDS600_QUEUE_POINTER] )
+ state = LFDS600_QUEUE_STATE_ATTEMPT_DELFDS600_QUEUE;
+
+ switch( state )
+ {
+ case LFDS600_QUEUE_STATE_EMPTY:
+ *user_data = NULL;
+ rv = 0;
+ finished_flag = RAISED;
+ break;
+
+ case LFDS600_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE:
+ next[LFDS600_QUEUE_COUNTER] = enqueue[LFDS600_QUEUE_COUNTER] + 1;
+ lfds600_abstraction_dcas( (volatile lfds600_atom_t *) qs->enqueue, (lfds600_atom_t *) next, (lfds600_atom_t *) enqueue );
+ break;
+
+ case LFDS600_QUEUE_STATE_ATTEMPT_DELFDS600_QUEUE:
+ *user_data = next[LFDS600_QUEUE_POINTER]->user_data;
+
+ next[LFDS600_QUEUE_COUNTER] = dequeue[LFDS600_QUEUE_COUNTER] + 1;
+ cas_result = lfds600_abstraction_dcas( (volatile lfds600_atom_t *) qs->dequeue, (lfds600_atom_t *) next, (lfds600_atom_t *) dequeue );
+
+ if( cas_result == 1 )
+ finished_flag = RAISED;
+ break;
+ }
+ }
+ }
+ while( finished_flag == LOWERED );
+
+ if( cas_result == 1 )
+ lfds600_freelist_push( qs->fs, dequeue[LFDS600_QUEUE_POINTER]->fe );
+
+ return( rv );
+}
+
--- /dev/null
+#include "lfds600_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds600_ringbuffer_delete( struct lfds600_ringbuffer_state *rs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )
+{
+ assert( rs != NULL );
+ // TRD : user_data_delete_function can be NULL
+ // TRD : user_state can be NULL
+
+ lfds600_queue_delete( rs->qs, NULL, NULL );
+
+ lfds600_freelist_delete( rs->fs, user_data_delete_function, user_state );
+
+ lfds600_abstraction_aligned_free( rs );
+
+ return;
+}
+
--- /dev/null
+#include "lfds600_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+struct lfds600_freelist_element *lfds600_ringbuffer_get_read_element( struct lfds600_ringbuffer_state *rs, struct lfds600_freelist_element **fe )
+{
+ assert( rs != NULL );
+ assert( fe != NULL );
+
+ lfds600_queue_dequeue( rs->qs, (void **) fe );
+
+ return( *fe );
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds600_freelist_element *lfds600_ringbuffer_get_write_element( struct lfds600_ringbuffer_state *rs, struct lfds600_freelist_element **fe, int *overwrite_flag )
+{
+ assert( rs != NULL );
+ assert( fe != NULL );
+ // TRD : overwrite_flag can be NULL
+
+ /* TRD : we try to obtain an element from the lfds600_freelist
+ if we can, we populate it and add it to the lfds600_queue
+
+ if we cannot, then the lfds600_ringbuffer is full
+ so instead we grab the current read element and
+ use that instead
+
+ dequeue may fail since the lfds600_queue may be emptied
+ during our dequeue attempt
+
+ so what we actually do here is a loop, attempting
+ the lfds600_freelist and if it fails then a dequeue, until
+ we obtain an element
+
+ once we have an element, we lfds600_queue it
+
+ you may be wondering why this operation is in a loop
+ remember - these operations are lock-free; anything
+ can happen in between
+
+ so for example the pop could fail because the lfds600_freelist
+ is empty; but by the time we go to get an element from
+ the lfds600_queue, the whole lfds600_queue has been emptied back into
+ the lfds600_freelist!
+
+ if overwrite_flag is provided, we set it to 0 if we
+ obtained a new element from the lfds600_freelist, 1 if we
+ stole an element from the lfds600_queue
+ */
+
+ do
+ {
+ if( overwrite_flag != NULL )
+ *overwrite_flag = 0;
+
+ lfds600_freelist_pop( rs->fs, fe );
+
+ if( *fe == NULL )
+ {
+ lfds600_ringbuffer_get_read_element( rs, fe );
+
+ if( overwrite_flag != NULL and *fe != NULL )
+ *overwrite_flag = 1;
+ }
+ }
+ while( *fe == NULL );
+
+ return( *fe );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_ringbuffer_put_read_element( struct lfds600_ringbuffer_state *rs, struct lfds600_freelist_element *fe )
+{
+ assert( rs != NULL );
+ assert( fe != NULL );
+
+ lfds600_freelist_push( rs->fs, fe );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_ringbuffer_put_write_element( struct lfds600_ringbuffer_state *rs, struct lfds600_freelist_element *fe )
+{
+ assert( rs != NULL );
+ assert( fe != NULL );
+
+ lfds600_queue_enqueue( rs->qs, fe );
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "lfds600_internal.h"
+
+/***** defines *****/
+
+/***** structures *****/
+#pragma pack( push, LFDS600_ALIGN_DOUBLE_POINTER )
+
+struct lfds600_ringbuffer_state
+{
+ struct lfds600_queue_state
+ *qs;
+
+ struct lfds600_freelist_state
+ *fs;
+};
+
+#pragma pack( pop )
+
+/***** externs *****/
+
+/***** private prototypes *****/
+void lfds600_ringbuffer_internal_validate( struct lfds600_ringbuffer_state *rs, struct lfds600_validation_info *vi, enum data_structure_validity *lfds600_queue_validity, enum data_structure_validity *lfds600_freelist_validity );
+
--- /dev/null
+#include "lfds600_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds600_ringbuffer_new( struct lfds600_ringbuffer_state **rs, lfds600_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state )
+{
+ int
+ rv = 0;
+
+ assert( rs != NULL );
+ // TRD : number_elements can be any value in its range
+ // TRD : user_data_init_function can be NULL
+ // TRD : user_state can be NULL
+
+ *rs = (struct lfds600_ringbuffer_state *) lfds600_abstraction_aligned_malloc( sizeof(struct lfds600_ringbuffer_state), LFDS600_ALIGN_DOUBLE_POINTER );
+
+ if( *rs != NULL )
+ {
+ lfds600_freelist_new( &(*rs)->fs, number_elements, user_data_init_function, user_state );
+
+ if( (*rs)->fs != NULL )
+ {
+ lfds600_queue_new( &(*rs)->qs, number_elements );
+
+ if( (*rs)->qs != NULL )
+ rv = 1;
+
+ if( (*rs)->qs == NULL )
+ {
+ lfds600_abstraction_aligned_free( *rs );
+ *rs = NULL;
+ }
+ }
+
+ if( (*rs)->fs == NULL )
+ {
+ lfds600_abstraction_aligned_free( *rs );
+ *rs = NULL;
+ }
+ }
+
+ return( rv );
+}
+
--- /dev/null
+#include "lfds600_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds600_ringbuffer_query( struct lfds600_ringbuffer_state *rs, enum lfds600_ringbuffer_query_type query_type, void *query_input, void *query_output )
+{
+ assert( rs != NULL );
+ // TRD : query_type can be any value in its range
+ // TRD : query_input can be NULL
+ assert( query_output != NULL );
+
+ switch( query_type )
+ {
+ case LFDS600_RINGBUFFER_QUERY_VALIDATE:
+ // TRD : query_input can be NULL
+
+ lfds600_ringbuffer_internal_validate( rs, (struct lfds600_validation_info *) query_input, (enum data_structure_validity *) query_output, ((enum data_structure_validity *) query_output)+2 );
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+void lfds600_ringbuffer_internal_validate( struct lfds600_ringbuffer_state *rs, struct lfds600_validation_info *vi, enum data_structure_validity *lfds600_queue_validity, enum data_structure_validity *lfds600_freelist_validity )
+{
+ assert( rs != NULL );
+ // TRD : vi can be NULL
+ assert( lfds600_queue_validity != NULL );
+ assert( lfds600_freelist_validity != NULL );
+
+ lfds600_queue_query( rs->qs, LFDS600_QUEUE_QUERY_VALIDATE, vi, lfds600_queue_validity );
+
+ if( vi != NULL )
+ {
+ struct lfds600_validation_info
+ lfds600_freelist_vi;
+
+ lfds600_atom_t
+ total_elements;
+
+ lfds600_freelist_query( rs->fs, LFDS600_FREELIST_QUERY_ELEMENT_COUNT, NULL, (void *) &total_elements );
+ lfds600_freelist_vi.min_elements = total_elements - vi->max_elements;
+ lfds600_freelist_vi.max_elements = total_elements - vi->min_elements;
+ lfds600_freelist_query( rs->fs, LFDS600_FREELIST_QUERY_VALIDATE, (void *) &lfds600_freelist_vi, (void *) lfds600_freelist_validity );
+ }
+
+ if( vi == NULL )
+ lfds600_freelist_query( rs->fs, LFDS600_FREELIST_QUERY_VALIDATE, NULL, (void *) lfds600_freelist_validity );
+
+ return;
+}
+
--- /dev/null
+#include "lfds600_slist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds600_slist_delete( struct lfds600_slist_state *ss )
+{
+ lfds600_slist_delete_all_elements( ss );
+
+ lfds600_abstraction_aligned_free( ss );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_slist_delete_element( struct lfds600_slist_state *ss, struct lfds600_slist_element *se )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) void
+ *volatile user_data_and_flags[2],
+ *volatile new_user_data_and_flags[2];
+
+ unsigned char
+ cas_rv = 0;
+
+ assert( ss != NULL );
+ assert( se != NULL );
+
+ user_data_and_flags[LFDS600_SLIST_USER_DATA] = se->user_data_and_flags[LFDS600_SLIST_USER_DATA];
+ user_data_and_flags[LFDS600_SLIST_FLAGS] = se->user_data_and_flags[LFDS600_SLIST_FLAGS];
+
+ do
+ {
+ new_user_data_and_flags[LFDS600_SLIST_USER_DATA] = user_data_and_flags[LFDS600_SLIST_USER_DATA];
+ new_user_data_and_flags[LFDS600_SLIST_FLAGS] = (void *) ((lfds600_atom_t) user_data_and_flags[LFDS600_SLIST_FLAGS] | LFDS600_SLIST_FLAG_DELETED);
+ }
+ while( !((lfds600_atom_t) user_data_and_flags[LFDS600_SLIST_FLAGS] & LFDS600_SLIST_FLAG_DELETED) and 0 == (cas_rv = lfds600_abstraction_dcas((volatile lfds600_atom_t *) se->user_data_and_flags, (lfds600_atom_t *) new_user_data_and_flags, (lfds600_atom_t *) user_data_and_flags)) );
+
+ if( cas_rv == 1 )
+ if( ss->user_data_delete_function != NULL )
+ ss->user_data_delete_function( (void *) user_data_and_flags[LFDS600_SLIST_USER_DATA], ss->user_state );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_slist_delete_all_elements( struct lfds600_slist_state *ss )
+{
+ struct lfds600_slist_element
+ *volatile se,
+ *volatile se_temp;
+
+ se = ss->head;
+
+ while( se != NULL )
+ {
+ // TRD : if a non-deleted element and there is a delete function, call the delete function
+ if( ss->user_data_delete_function != NULL )
+ ss->user_data_delete_function( (void *) se->user_data_and_flags[LFDS600_SLIST_USER_DATA], ss->user_state );
+
+ se_temp = se;
+ se = se->next;
+ lfds600_abstraction_aligned_free( (void *) se_temp );
+ }
+
+ lfds600_slist_internal_init_slist( ss, ss->user_data_delete_function, ss->user_state );
+
+ return;
+}
+
--- /dev/null
+#include "lfds600_slist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds600_slist_get_user_data_from_element( struct lfds600_slist_element *se, void **user_data )
+{
+ int
+ rv = 1;
+
+ assert( se != NULL );
+ assert( user_data != NULL );
+
+ *user_data = (void *) se->user_data_and_flags[LFDS600_SLIST_USER_DATA];
+
+ if( (lfds600_atom_t) se->user_data_and_flags[LFDS600_SLIST_FLAGS] & LFDS600_SLIST_FLAG_DELETED )
+ rv = 0;
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds600_slist_set_user_data_in_element( struct lfds600_slist_element *se, void *user_data )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) volatile void
+ *user_data_and_flags[2],
+ *new_user_data_and_flags[2];
+
+ int
+ rv = 1;
+
+ assert( se != NULL );
+ // TRD : user_data can be NULL
+
+ user_data_and_flags[LFDS600_SLIST_USER_DATA] = se->user_data_and_flags[LFDS600_SLIST_USER_DATA];
+ user_data_and_flags[LFDS600_SLIST_FLAGS] = se->user_data_and_flags[LFDS600_SLIST_FLAGS];
+
+ new_user_data_and_flags[LFDS600_SLIST_USER_DATA] = user_data;
+
+ do
+ {
+ new_user_data_and_flags[LFDS600_SLIST_FLAGS] = user_data_and_flags[LFDS600_SLIST_FLAGS];
+ }
+ while( !((lfds600_atom_t) user_data_and_flags[LFDS600_SLIST_FLAGS] & LFDS600_SLIST_FLAG_DELETED) and 0 == lfds600_abstraction_dcas((volatile lfds600_atom_t *) se->user_data_and_flags, (lfds600_atom_t *) new_user_data_and_flags, (lfds600_atom_t *) user_data_and_flags) );
+
+ if( (lfds600_atom_t) user_data_and_flags[LFDS600_SLIST_FLAGS] & LFDS600_SLIST_FLAG_DELETED )
+ rv = 0;
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds600_slist_element *lfds600_slist_get_head( struct lfds600_slist_state *ss, struct lfds600_slist_element **se )
+{
+ assert( ss != NULL );
+ assert( se != NULL );
+
+ *se = (struct lfds600_slist_element *) ss->head;
+
+ lfds600_slist_internal_move_to_first_undeleted_element( se );
+
+ return( *se );
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds600_slist_element *lfds600_slist_get_next( struct lfds600_slist_element *se, struct lfds600_slist_element **next_se )
+{
+ assert( se != NULL );
+ assert( next_se != NULL );
+
+ *next_se = (struct lfds600_slist_element *) se->next;
+
+ lfds600_slist_internal_move_to_first_undeleted_element( next_se );
+
+ return( *next_se );
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds600_slist_element *lfds600_slist_get_head_and_then_next( struct lfds600_slist_state *ss, struct lfds600_slist_element **se )
+{
+ assert( ss != NULL );
+ assert( se != NULL );
+
+ if( *se == NULL )
+ lfds600_slist_get_head( ss, se );
+ else
+ lfds600_slist_get_next( *se, se );
+
+ return( *se );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_slist_internal_move_to_first_undeleted_element( struct lfds600_slist_element **se )
+{
+ assert( se != NULL );
+
+ while( *se != NULL and (lfds600_atom_t) (*se)->user_data_and_flags[LFDS600_SLIST_FLAGS] & LFDS600_SLIST_FLAG_DELETED )
+ (*se) = (struct lfds600_slist_element *) (*se)->next;
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "lfds600_internal.h"
+
+/***** defines *****/
+#define LFDS600_SLIST_USER_DATA 0
+#define LFDS600_SLIST_FLAGS 1
+
+#define LFDS600_SLIST_NO_FLAGS 0x0
+#define LFDS600_SLIST_FLAG_DELETED 0x1
+
+/***** structures *****/
+#pragma pack( push, LFDS600_ALIGN_SINGLE_POINTER )
+
+struct lfds600_slist_state
+{
+ struct lfds600_slist_element
+ *volatile head;
+
+ void
+ (*user_data_delete_function)( void *user_data, void *user_state ),
+ *user_state;
+};
+
+#pragma pack( pop )
+
+#pragma pack( push, LFDS600_ALIGN_DOUBLE_POINTER )
+
+/* TRD : this pragma pack doesn't seem to work under Windows
+ if the structure members are the correct way round
+ (next first), then user_data_and_flags ends up on
+ a single pointer boundary and DCAS crashes
+
+ accordingly, I've moved user_data_and_flags first
+*/
+
+struct lfds600_slist_element
+{
+ void
+ *volatile user_data_and_flags[2];
+
+ // TRD : requires volatile as is target of CAS
+ struct lfds600_slist_element
+ *volatile next;
+};
+
+#pragma pack( pop )
+
+/***** private prototypes *****/
+void lfds600_slist_internal_init_slist( struct lfds600_slist_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+void lfds600_slist_internal_link_element_to_head( struct lfds600_slist_state *lfds600_slist_state, struct lfds600_slist_element *volatile se );
+void lfds600_slist_internal_link_element_after_element( struct lfds600_slist_element *volatile lfds600_slist_in_list_element, struct lfds600_slist_element *volatile se );
+
+void lfds600_slist_internal_move_to_first_undeleted_element( struct lfds600_slist_element **se );
+
--- /dev/null
+#include "lfds600_slist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds600_slist_internal_link_element_to_head( struct lfds600_slist_state *ss, struct lfds600_slist_element *volatile se )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_SINGLE_POINTER) struct lfds600_slist_element
+ *se_next;
+
+ assert( ss != NULL );
+ assert( se != NULL );
+
+ se_next = ss->head;
+
+ do
+ {
+ se->next = se_next;
+ }
+ while( se->next != (se_next = (struct lfds600_slist_element *) lfds600_abstraction_cas((volatile lfds600_atom_t *) &ss->head, (lfds600_atom_t) se, (lfds600_atom_t) se->next)) );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_slist_internal_link_element_after_element( struct lfds600_slist_element *volatile lfds600_slist_in_list_element, struct lfds600_slist_element *volatile se )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_SINGLE_POINTER) struct lfds600_slist_element
+ *se_prev,
+ *se_next;
+
+ assert( lfds600_slist_in_list_element != NULL );
+ assert( se != NULL );
+
+ se_prev = (struct lfds600_slist_element *) lfds600_slist_in_list_element;
+
+ se_next = se_prev->next;
+
+ do
+ {
+ se->next = se_next;
+ }
+ while( se->next != (se_next = (struct lfds600_slist_element *) lfds600_abstraction_cas((volatile lfds600_atom_t *) &se_prev->next, (lfds600_atom_t) se, (lfds600_atom_t) se->next)) );
+
+ return;
+}
+
--- /dev/null
+#include "lfds600_slist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds600_slist_new( struct lfds600_slist_state **ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )
+{
+ int
+ rv = 0;
+
+ assert( ss != NULL );
+ // TRD : user_data_delete_function can be NULL
+ // TRD : user_state can be NULL
+
+ *ss = (struct lfds600_slist_state *) lfds600_abstraction_aligned_malloc( sizeof(struct lfds600_slist_state), LFDS600_ALIGN_SINGLE_POINTER );
+
+ if( *ss != NULL )
+ {
+ lfds600_slist_internal_init_slist( *ss, user_data_delete_function, user_state );
+ rv = 1;
+ }
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_slist_internal_init_slist( struct lfds600_slist_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )
+{
+ assert( ss != NULL );
+ // TRD : user_data_delete_function can be NULL
+ // TRD : user_state can be NULL
+
+ ss->head = NULL;
+ ss->user_data_delete_function = user_data_delete_function;
+ ss->user_state = user_state;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds600_slist_element *lfds600_slist_new_head( struct lfds600_slist_state *ss, void *user_data )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_SINGLE_POINTER) struct lfds600_slist_element
+ *volatile se;
+
+ assert( ss != NULL );
+ // TRD : user_data can be NULL
+
+ se = (struct lfds600_slist_element *) lfds600_abstraction_aligned_malloc( sizeof(struct lfds600_slist_element), LFDS600_ALIGN_DOUBLE_POINTER );
+
+ if( se != NULL )
+ {
+ se->user_data_and_flags[LFDS600_SLIST_USER_DATA] = user_data;
+ se->user_data_and_flags[LFDS600_SLIST_FLAGS] = LFDS600_SLIST_NO_FLAGS;
+
+ lfds600_slist_internal_link_element_to_head( ss, se );
+ }
+
+ return( (struct lfds600_slist_element *) se );
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds600_slist_element *lfds600_slist_new_next( struct lfds600_slist_element *se, void *user_data )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_SINGLE_POINTER) struct lfds600_slist_element
+ *volatile se_next;
+
+ assert( se != NULL );
+ // TRD : user_data can be NULL
+
+ se_next = (struct lfds600_slist_element *) lfds600_abstraction_aligned_malloc( sizeof(struct lfds600_slist_element), LFDS600_ALIGN_DOUBLE_POINTER );
+
+ if( se_next != NULL )
+ {
+ se_next->user_data_and_flags[LFDS600_SLIST_USER_DATA] = user_data;
+ se_next->user_data_and_flags[LFDS600_SLIST_FLAGS] = LFDS600_SLIST_NO_FLAGS;
+
+ lfds600_slist_internal_link_element_after_element( se, se_next );
+ }
+
+ return( (struct lfds600_slist_element *) se_next );
+}
+
--- /dev/null
+#include "lfds600_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds600_stack_delete( struct lfds600_stack_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )
+{
+ void
+ *user_data;
+
+ assert( ss != NULL );
+ // TRD : user_data_delete_function can be NULL
+ // TRD : user_state can be NULL
+
+ while( lfds600_stack_pop(ss, &user_data) )
+ if( user_data_delete_function != NULL )
+ user_data_delete_function( user_data, user_state );
+
+ lfds600_freelist_delete( ss->fs, lfds600_stack_internal_freelist_delete_function, NULL );
+
+ lfds600_abstraction_aligned_free( ss );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_stack_clear( struct lfds600_stack_state *ss, void (*user_data_clear_function)(void *user_data, void *user_state), void *user_state )
+{
+ void
+ *user_data;
+
+ assert( ss != NULL );
+ // TRD : user_data_clear_function can be NULL
+ // TRD : user_state can be NULL
+
+ while( lfds600_stack_pop(ss, &user_data) )
+ if( user_data_clear_function != NULL )
+ user_data_clear_function( user_data, user_state );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds600_stack_internal_freelist_delete_function( void *user_data, void *user_state )
+{
+ assert( user_data != NULL );
+ assert( user_state == NULL );
+
+ lfds600_abstraction_aligned_free( user_data );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** the library wide include file *****/
+#include "lfds600_internal.h"
+
+/***** pragmas *****/
+
+/***** defines *****/
+#define LFDS600_STACK_POINTER 0
+#define LFDS600_STACK_COUNTER 1
+#define LFDS600_STACK_PAC_SIZE 2
+
+/***** structures *****/
+#pragma pack( push, LFDS600_ALIGN_DOUBLE_POINTER )
+
+struct lfds600_stack_state
+{
+ // TRD : must come first for alignment
+ struct lfds600_stack_element
+ *volatile top[LFDS600_STACK_PAC_SIZE];
+
+ lfds600_atom_t
+ aba_counter;
+
+ struct lfds600_freelist_state
+ *fs;
+};
+
+struct lfds600_stack_element
+{
+ struct lfds600_stack_element
+ *next[LFDS600_STACK_PAC_SIZE];
+
+ struct lfds600_freelist_element
+ *fe;
+
+ void
+ *user_data;
+};
+
+#pragma pack( pop )
+
+/***** private prototypes *****/
+int lfds600_stack_internal_freelist_init_function( void **user_data, void *user_state );
+void lfds600_stack_internal_freelist_delete_function( void *user_data, void *user_state );
+
+void lfds600_stack_internal_new_element_from_freelist( struct lfds600_stack_state *ss, struct lfds600_stack_element *se[LFDS600_STACK_PAC_SIZE], void *user_data );
+void lfds600_stack_internal_new_element( struct lfds600_stack_state *ss, struct lfds600_stack_element *se[LFDS600_STACK_PAC_SIZE], void *user_data );
+void lfds600_stack_internal_init_element( struct lfds600_stack_state *ss, struct lfds600_stack_element *se[LFDS600_STACK_PAC_SIZE], struct lfds600_freelist_element *fe, void *user_data );
+
+void lfds600_stack_internal_push( struct lfds600_stack_state *ss, struct lfds600_stack_element *se[LFDS600_STACK_PAC_SIZE] );
+
--- /dev/null
+#include "lfds600_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds600_stack_new( struct lfds600_stack_state **ss, lfds600_atom_t number_elements )
+{
+ int
+ rv = 0;
+
+ assert( ss != NULL );
+ // TRD : number_elements can be any value in its range
+
+ *ss = (struct lfds600_stack_state *) lfds600_abstraction_aligned_malloc( sizeof(struct lfds600_stack_state), LFDS600_ALIGN_DOUBLE_POINTER );
+
+ if( *ss != NULL )
+ {
+ // TRD : the size of the lfds600_freelist is the size of the lfds600_stack
+ lfds600_freelist_new( &(*ss)->fs, number_elements, lfds600_stack_internal_freelist_init_function, NULL );
+
+ if( (*ss)->fs == NULL )
+ {
+ lfds600_abstraction_aligned_free( *ss );
+ *ss = NULL;
+ }
+
+ if( (*ss)->fs != NULL )
+ {
+ (*ss)->top[LFDS600_STACK_POINTER] = NULL;
+ (*ss)->top[LFDS600_STACK_COUNTER] = 0;
+ (*ss)->aba_counter = 0;
+ rv = 1;
+ }
+ }
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int lfds600_stack_internal_freelist_init_function( void **user_data, void *user_state )
+{
+ int
+ rv = 0;
+
+ assert( user_data != NULL );
+ assert( user_state == NULL );
+
+ *user_data = lfds600_abstraction_aligned_malloc( sizeof(struct lfds600_stack_element), LFDS600_ALIGN_DOUBLE_POINTER );
+
+ if( *user_data != NULL )
+ rv = 1;
+
+ return( rv );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+void lfds600_stack_internal_new_element_from_freelist( struct lfds600_stack_state *ss, struct lfds600_stack_element *se[LFDS600_STACK_PAC_SIZE], void *user_data )
+{
+ struct lfds600_freelist_element
+ *fe;
+
+ assert( ss != NULL );
+ assert( se != NULL );
+ // TRD : user_data can be any value in its range
+
+ lfds600_freelist_pop( ss->fs, &fe );
+
+ if( fe == NULL )
+ se[LFDS600_STACK_POINTER] = NULL;
+
+ if( fe != NULL )
+ lfds600_stack_internal_init_element( ss, se, fe, user_data );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_stack_internal_new_element( struct lfds600_stack_state *ss, struct lfds600_stack_element *se[LFDS600_STACK_PAC_SIZE], void *user_data )
+{
+ struct lfds600_freelist_element
+ *fe;
+
+ assert( ss != NULL );
+ assert( se != NULL );
+ // TRD : user_data can be any value in its range
+
+ lfds600_freelist_guaranteed_pop( ss->fs, &fe );
+
+ if( fe == NULL )
+ se[LFDS600_STACK_POINTER] = NULL;
+
+ if( fe != NULL )
+ lfds600_stack_internal_init_element( ss, se, fe, user_data );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_stack_internal_init_element( struct lfds600_stack_state *ss, struct lfds600_stack_element *se[LFDS600_STACK_PAC_SIZE], struct lfds600_freelist_element *fe, void *user_data )
+{
+ assert( ss != NULL );
+ assert( se != NULL );
+ assert( fe != NULL );
+ // TRD : user_data can be any value in its range
+
+ lfds600_freelist_get_user_data_from_element( fe, (void **) &se[LFDS600_STACK_POINTER] );
+
+ se[LFDS600_STACK_COUNTER] = (struct lfds600_stack_element *) lfds600_abstraction_increment( (lfds600_atom_t *) &ss->aba_counter );
+
+ se[LFDS600_STACK_POINTER]->next[LFDS600_STACK_POINTER] = NULL;
+ se[LFDS600_STACK_POINTER]->next[LFDS600_STACK_COUNTER] = 0;
+ se[LFDS600_STACK_POINTER]->fe = fe;
+ se[LFDS600_STACK_POINTER]->user_data = user_data;
+
+ return;
+}
+
--- /dev/null
+#include "lfds600_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds600_stack_push( struct lfds600_stack_state *ss, void *user_data )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) struct lfds600_stack_element
+ *se[LFDS600_STACK_PAC_SIZE];
+
+ assert( ss != NULL );
+ // TRD : user_data can be NULL
+
+ lfds600_stack_internal_new_element_from_freelist( ss, se, user_data );
+
+ if( se[LFDS600_STACK_POINTER] == NULL )
+ return( 0 );
+
+ lfds600_stack_internal_push( ss, se );
+
+ return( 1 );
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds600_stack_guaranteed_push( struct lfds600_stack_state *ss, void *user_data )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) struct lfds600_stack_element
+ *se[LFDS600_STACK_PAC_SIZE];
+
+ assert( ss != NULL );
+ // TRD : user_data can be NULL
+
+ /* TRD : this function allocated a new lfds600_freelist element and uses that
+ to push onto the lfds600_stack, guaranteeing success (unless malloc()
+ fails of course)
+ */
+
+ lfds600_stack_internal_new_element( ss, se, user_data );
+
+ // TRD : malloc failed
+ if( se[LFDS600_STACK_POINTER] == NULL )
+ return( 0 );
+
+ lfds600_stack_internal_push( ss, se );
+
+ return( 1 );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds600_stack_internal_push( struct lfds600_stack_state *ss, struct lfds600_stack_element *se[LFDS600_STACK_PAC_SIZE] )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) struct lfds600_stack_element
+ *original_se_next[LFDS600_STACK_PAC_SIZE];
+
+ assert( ss != NULL );
+ assert( se != NULL );
+
+ original_se_next[LFDS600_STACK_POINTER] = ss->top[LFDS600_STACK_POINTER];
+ original_se_next[LFDS600_STACK_COUNTER] = ss->top[LFDS600_STACK_COUNTER];
+
+ do
+ {
+ se[LFDS600_STACK_POINTER]->next[LFDS600_STACK_POINTER] = original_se_next[LFDS600_STACK_POINTER];
+ se[LFDS600_STACK_POINTER]->next[LFDS600_STACK_COUNTER] = original_se_next[LFDS600_STACK_COUNTER];
+ }
+ while( 0 == lfds600_abstraction_dcas((volatile lfds600_atom_t *) ss->top, (lfds600_atom_t *) se, (lfds600_atom_t *) original_se_next) );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds600_stack_pop( struct lfds600_stack_state *ss, void **user_data )
+{
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) struct lfds600_stack_element
+ *se[LFDS600_STACK_PAC_SIZE];
+
+ assert( ss != NULL );
+ assert( user_data != NULL );
+
+ se[LFDS600_STACK_COUNTER] = ss->top[LFDS600_STACK_COUNTER];
+ se[LFDS600_STACK_POINTER] = ss->top[LFDS600_STACK_POINTER];
+
+ do
+ {
+ if( se[LFDS600_STACK_POINTER] == NULL )
+ return( 0 );
+ }
+ while( 0 == lfds600_abstraction_dcas((volatile lfds600_atom_t *) ss->top, (lfds600_atom_t *) se[LFDS600_STACK_POINTER]->next, (lfds600_atom_t *) se) );
+
+ *user_data = se[LFDS600_STACK_POINTER]->user_data;
+
+ lfds600_freelist_push( ss->fs, se[LFDS600_STACK_POINTER]->fe );
+
+ return( 1 );
+}
+
--- /dev/null
+#include "lfds600_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds600_stack_query( struct lfds600_stack_state *ss, enum lfds600_stack_query_type query_type, void *query_input, void *query_output )
+{
+ assert( ss != NULL );
+ // TRD : query_type can be any value in its range
+ // TRD : query_iput can be NULL
+ assert( query_output != NULL );
+
+ switch( query_type )
+ {
+ case LFDS600_STACK_QUERY_ELEMENT_COUNT:
+ assert( query_input == NULL );
+
+ lfds600_freelist_query( ss->fs, LFDS600_FREELIST_QUERY_ELEMENT_COUNT, NULL, query_output );
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+building test
+=============
+
+Windows (user-mode)
+===================
+1. Use Microsoft Visual Studio 2008 or Visual C++ 2008 Express Edition
+ to load "test.sln".
+
+2. Use Microsoft Windows SDK and GNUmake to run makefile.windows (obviously
+ you'll need to have run the appropriate vcvars*.bat first; you can build
+ for both IA64, 64-bit and 32-bit - just run the correct vcvars batch file).
+
+ Targets are "rel", "dbg" and "clean". You need to clean between switching
+ targets.
+
+Windows (kernel)
+================
+No build supported, since this is a command line utility.
+
+Linux
+=====
+Use GNUmake to run "makefile.linux". Targets are "rel", "dbg" and
+"clean". You need to clean between switching targets.
+
+
--- /dev/null
+##### paths #####
+BINDIR = bin
+INCDIR = ../liblfds600/inc
+LIBDIR = ../liblfds600/bin
+OBJDIR = obj
+SRCDIR = src
+
+##### misc #####
+QUIETLY = 1>nul 2>nul
+
+##### sources, objects and libraries #####
+BINNAME = test
+BINARY = $(BINDIR)/$(BINNAME)
+SRCDIRS = .
+SOURCES = abstraction_cpu_count.c test_abstraction.c abstraction_thread_start.c abstraction_thread_wait.c benchmark_freelist.c benchmark_queue.c benchmark_ringbuffer.c benchmark_stack.c test_freelist.c main.c misc.c test_queue.c test_ringbuffer.c test_slist.c test_stack.c
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))
+SYSLIBS = -lpthread -lc -lm
+USRLIBS = -llfds600
+
+##### CPU variants #####
+GCCARCH = $(shell uname -m)
+
+ifeq ($(GCCARCH),x86_64)
+ GCCARCH = core2
+endif
+
+ifeq ($(findstring arm,$(GCCARCH)),arm)
+ GCCARCH = armv6k
+endif
+
+##### tools #####
+MAKE = make
+MFLAGS =
+
+DG = gcc
+DGFLAGS = -MM -std=c99 -I"$(SRCDIR)" -I"$(INCDIR)"
+
+CC = gcc
+CFBASE = -Wall -Wno-unknown-pragmas -std=c99 -march=$(GCCARCH) -c -I"$(SRCDIR)" -I"$(INCDIR)"
+CFREL = -O2 -Wno-strict-aliasing
+CFDBG = -O0 -g
+
+LD = gcc
+LFBASE = -L"$(LIBDIR)"
+LFREL = -O2 -s
+LFDBG = -O0 -g
+
+##### variants #####
+CFLAGS = $(CFBASE) $(CFDBG)
+LFLAGS = $(LFBASE) $(LFDBG)
+
+ifeq ($(MAKECMDGOALS),rel)
+ CFLAGS = $(CFBASE) $(CFREL)
+ LFLAGS = $(LFBASE) $(LFREL)
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.o : %.c
+ $(DG) $(DGFLAGS) $< >$(OBJDIR)/$*.d
+ $(CC) $(CFLAGS) -o $@ $<
+
+##### explicit rules #####
+$(BINARY) : $(OBJECTS)
+ $(LD) -o $(BINARY) $(LFLAGS) $(OBJECTS) $(USRLIBS) $(SYSLIBS)
+ chmod +x $(BINARY)
+
+##### phony #####
+.PHONY : clean rel dbg
+
+clean :
+ @rm -f $(BINDIR)/$(BINNAME) $(OBJDIR)/*.o $(OBJDIR)/*.d
+
+rel : $(BINARY)
+dbg : $(BINARY)
+
+##### dependencies #####
+-include $(DEPENDS)
+
--- /dev/null
+##### paths #####
+BINDIR = bin
+INCDIR = ../liblfds600/inc
+LIBDIR = ../liblfds600/bin
+OBJDIR = obj
+SRCDIR = src
+
+##### misc #####
+QUIETLY = 1>nul 2>nul
+
+##### sources, objects and libraries #####
+BINNAME = test
+BINARY = $(BINDIR)\$(BINNAME).exe
+SRCDIRS = .
+SOURCES = abstraction_cpu_count.c test_abstraction.c abstraction_thread_start.c abstraction_thread_wait.c benchmark_freelist.c benchmark_queue.c benchmark_ringbuffer.c benchmark_stack.c test_freelist.c main.c misc.c test_queue.c test_ringbuffer.c test_slist.c test_stack.c
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))
+SYSLIBS = kernel32.lib
+USRLIBS = liblfds600.lib
+
+##### tools #####
+MAKE = make
+MFLAGS =
+
+CC = cl
+CFBASE = /nologo /W4 /WX /c "-I$(SRCDIR)" "-I$(INCLUDE)" "-I$(INCDIR)" "/Fd$(BINDIR)\$(BINNAME).pdb" /D UNICODE /D _UNICODE /DWIN32_LEAN_AND_MEAN /D_CRT_SECURE_NO_WARNINGS
+CFREL = /Ox /DNDEBUG /MT
+CFDBG = /Od /Gm /Zi /D_DEBUG /MTd
+
+LD = link
+LFBASE = "/libpath:$(LIB)" "/libpath:$(LIBDIR)" /nologo /subsystem:console /nodefaultlib /nxcompat /wx
+LFREL = /incremental:no
+LFDBG = /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"
+
+##### variants #####
+CFLAGS = $(CFBASE) $(CFDBG)
+LFLAGS = $(LFBASE) $(LFDBG)
+CLIB = libcmtd.lib
+
+ifeq ($(MAKECMDGOALS),rel)
+ CFLAGS = $(CFBASE) $(CFREL)
+ LFLAGS = $(LFBASE) $(LFREL)
+ CLIB = libcmt.lib
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.obj : %.c
+ $(CC) $(CFLAGS) "/Fo$@" $<
+
+##### explicit rules #####
+$(BINARY) : $(OBJECTS)
+ $(LD) $(LFLAGS) $(CLIB) $(SYSLIBS) $(USRLIBS) $(OBJECTS) /out:$(BINARY)
+
+##### phony #####
+.PHONY : clean rel dbg
+
+clean :
+ @erase /Q $(OBJDIR)\*.obj $(BINDIR)\$(BINNAME).* $(QUIETLY)
+
+rel : $(BINARY)
+dbg : $(BINARY)
+
--- /dev/null
+/***** defines *****/
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ !WIN_KERNEL_BUILD indicates Windows user-mode
+ */
+
+ #include <windows.h>
+ typedef HANDLE thread_state_t;
+ typedef DWORD thread_return_t;
+ #define CALLING_CONVENTION WINAPI
+#endif
+
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+ /* TRD : any Windows (kernel-mode) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ WIN_KERNEL_BUILD indicates Windows kernel
+ */
+
+ #include <wdm.h>
+ typedef HANDLE thread_state_t;
+ typedef VOID thread_return_t;
+ #define CALLING_CONVENTION
+#endif
+
+#if (defined __unix__ && __GNUC__)
+ /* TRD : any UNIX on any CPU with GCC
+
+ __unix__ indicates Solaris, Linux, HPUX, etc
+ __GNUC__ indicates GCC
+ */
+
+ #include <unistd.h>
+ #include <pthread.h>
+ typedef pthread_t thread_state_t;
+ typedef void * thread_return_t;
+ #define CALLING_CONVENTION
+#endif
+
+typedef thread_return_t (CALLING_CONVENTION *thread_function_t)( void *thread_user_state );
+
+/***** public prototypes *****/
+unsigned int abstraction_cpu_count( void );
+int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state );
+void abstraction_thread_wait( thread_state_t thread_state );
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ !WIN_KERNEL_BUILD indicates Windows user-mode
+ */
+
+ unsigned int abstraction_cpu_count()
+ {
+ SYSTEM_INFO
+ si;
+
+ GetNativeSystemInfo( &si );
+
+ return( (unsigned int) si.dwNumberOfProcessors );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ WIN_KERNEL_BUILD indicates Windows kernel
+ */
+
+ unsigned int abstraction_cpu_count()
+ {
+ unsigned int
+ active_processor_count;
+
+ active_processor_count = KeQueryActiveProcessorCount( NULL );
+
+ return( active_processor_count );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined __linux__ && __GNUC__)
+
+ /* TRD : Linux on any CPU with GCC
+
+ this function I believe is Linux specific and varies by UNIX flavour
+
+ __linux__ indicates Linux
+ __GNUC__ indicates GCC
+ */
+
+ unsigned int abstraction_cpu_count()
+ {
+ long int
+ cpu_count;
+
+ cpu_count = sysconf( _SC_NPROCESSORS_ONLN );
+
+ return( (unsigned int) cpu_count );
+ }
+
+#endif
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ !WIN_KERNEL_BUILD indicates Windows user-mode
+ */
+
+ int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state )
+ {
+ int
+ rv = 0;
+
+ DWORD
+ thread_id;
+
+ DWORD_PTR
+ affinity_mask,
+ result;
+
+ assert( thread_state != NULL );
+ // TRD : cpu can be any value in its range
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ affinity_mask = (DWORD_PTR) (1 << cpu);
+
+ *thread_state = CreateThread( NULL, 0, thread_function, thread_user_state, NO_FLAGS, &thread_id );
+
+ result = SetThreadAffinityMask( *thread_state, affinity_mask );
+
+ if( *thread_state != NULL and result != 0 )
+ rv = 1;
+
+ return( rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ WIN_KERNEL_BUILD indicates Windows kernel
+ */
+
+ int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state )
+ {
+ int
+ rv = 0;
+
+ KAFFINITY
+ affinity_mask
+
+ NTSTATUS
+ nts_create,
+ nts_affinity;
+
+ assert( thread_state != NULL );
+ // TRD : cpu can be any value in its range
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ affinity_mask = 1 << cpu;
+
+ nts_create = PsCreateSystemThread( thread_state, THREAD_ALL_ACCESS, NULL, NULL, NULL, thread_function, thread_user_state );
+
+ nts_affinity = ZwSetInformationThread( thread_state, ThreadAffinityMask, &affinity_mask, sizeof(KAFFINITY) );
+
+ if( nts_create == STATUS_SUCCESS and nts_affinity == STATUS_SUCCESS )
+ rv = 1;
+
+ return( rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined __unix__)
+
+ /* TRD : any UNIX on any CPU with any compiler
+
+ I assumed pthreads is available on any UNIX.
+
+ __unix__ indicates Solaris, Linux, HPUX, etc
+ */
+
+ int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state )
+ {
+ int
+ rv = 0,
+ rv_create;
+
+ assert( thread_state != NULL );
+ // TRD : cpu can be any value in its range
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ rv_create = pthread_create( thread_state, NULL, thread_function, thread_user_state );
+
+ if( rv_create == 0 )
+ rv = 1;
+
+ return( rv );
+ }
+
+#endif
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ !WIN_KERNEL_BUILD indicates Windows user-mode
+ */
+
+ void abstraction_thread_wait( thread_state_t thread_state )
+ {
+ WaitForSingleObject( thread_state, INFINITE );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ WIN_KERNEL_BUILD indicates Windows kernel
+ */
+
+ void abstraction_thread_wait( thread_state_t thread_state )
+ {
+ KeWaitForSingleObject( thread_state, Executive, KernelMode, FALSE, NULL );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined __unix__)
+
+ /* TRD : any UNIX on any CPU with any compiler
+
+ I assumed pthreads is available on any UNIX.
+
+ __unix__ indicates Solaris, Linux, HPUX, etc
+ */
+
+ void abstraction_thread_wait( thread_state_t thread_state )
+ {
+ pthread_join( thread_state, NULL );
+
+ return;
+ }
+
+#endif
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void benchmark_lfds600_freelist( void )
+{
+ unsigned int
+ loop,
+ thread_count,
+ cpu_count;
+
+ struct lfds600_freelist_state
+ *fs;
+
+ struct lfds600_freelist_benchmark
+ *fb;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds600_atom_t
+ total_operations_for_full_test_for_all_cpus,
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;
+
+ double
+ mean_operations_per_second_per_cpu,
+ difference_per_second_per_cpu,
+ total_difference_per_second_per_cpu,
+ std_dev_per_second_per_cpu,
+ scalability;
+
+ /* TRD : here we benchmark the freelist
+
+ the benchmark is to have a single freelist
+ where a worker thread busy-works popping and then pushing
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );
+
+ fb = (struct lfds600_freelist_benchmark *) malloc( sizeof(struct lfds600_freelist_benchmark) * cpu_count );
+
+ // TRD : print the benchmark ID and CSV header
+ printf( "\n"
+ "Release %d Freelist Benchmark #1\n"
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS600_RELEASE_NUMBER );
+
+ // TRD : we run CPU count times for scalability
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )
+ {
+ // TRD : initialisation
+ lfds600_freelist_new( &fs, 1000, NULL, NULL );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (fb+loop)->fs = fs;
+ (fb+loop)->operation_count = 0;
+ }
+
+ // TRD : main test
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds600_freelist_thread_pop_and_push, fb+loop );
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : post test math
+ total_operations_for_full_test_for_all_cpus = 0;
+ total_difference_per_second_per_cpu = 0;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ total_operations_for_full_test_for_all_cpus += (fb+loop)->operation_count;
+
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;
+
+ if( thread_count == 1 )
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ {
+ difference_per_second_per_cpu = ((double) (fb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;
+ }
+
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );
+
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);
+
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );
+
+ // TRD : cleanup
+ lfds600_freelist_delete( fs, NULL, NULL );
+ }
+
+ free( fb );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION benchmark_lfds600_freelist_thread_pop_and_push( void *lfds600_freelist_benchmark )
+{
+ struct lfds600_freelist_benchmark
+ *fb;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ assert( lfds600_freelist_benchmark != NULL );
+
+ fb = (struct lfds600_freelist_benchmark *) lfds600_freelist_benchmark;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds600_freelist_pop( fb->fs, &fe );
+ lfds600_freelist_push( fb->fs, fe );
+
+ fb->operation_count += 2;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void benchmark_lfds600_queue( void )
+{
+ unsigned int
+ loop,
+ thread_count,
+ cpu_count;
+
+ struct lfds600_queue_state
+ *qs;
+
+ struct lfds600_queue_benchmark
+ *qb;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds600_atom_t
+ total_operations_for_full_test_for_all_cpus,
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;
+
+ double
+ mean_operations_per_second_per_cpu,
+ difference_per_second_per_cpu,
+ total_difference_per_second_per_cpu,
+ std_dev_per_second_per_cpu,
+ scalability;
+
+ /* TRD : here we benchmark the queue
+
+ the benchmark is to have a single queue
+ where a worker thread busy-works dequeuing and then queuing
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );
+
+ qb = (struct lfds600_queue_benchmark *) malloc( sizeof(struct lfds600_queue_benchmark) * cpu_count );
+
+ // TRD : print the benchmark ID and CSV header
+ printf( "\n"
+ "Release %d Queue Benchmark #1\n"
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS600_RELEASE_NUMBER );
+
+ // TRD : we run CPU count times for scalability
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )
+ {
+ // TRD : initialisation
+ lfds600_queue_new( &qs, 1000 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (qb+loop)->qs = qs;
+ (qb+loop)->operation_count = 0;
+ }
+
+ // TRD : populate the queue (we don't actually use the user data)
+ for( loop = 0 ; loop < 500 ; loop++ )
+ lfds600_queue_enqueue( qs, (void *) (lfds600_atom_t) loop );
+
+ // TRD : main test
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds600_queue_thread_delfds600_queue_and_enqueue, qb+loop );
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : post test math
+ total_operations_for_full_test_for_all_cpus = 0;
+ total_difference_per_second_per_cpu = 0;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ total_operations_for_full_test_for_all_cpus += (qb+loop)->operation_count;
+
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;
+
+ if( thread_count == 1 )
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ {
+ difference_per_second_per_cpu = ((double) (qb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;
+ }
+
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );
+
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);
+
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );
+
+ // TRD : cleanup
+ lfds600_queue_delete( qs, NULL, NULL );
+ }
+
+ free( qb );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION benchmark_lfds600_queue_thread_delfds600_queue_and_enqueue( void *lfds600_queue_benchmark )
+{
+ struct lfds600_queue_benchmark
+ *qb;
+
+ void
+ *user_data;
+
+ time_t
+ start_time;
+
+ assert( lfds600_queue_benchmark != NULL );
+
+ qb = (struct lfds600_queue_benchmark *) lfds600_queue_benchmark;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds600_queue_dequeue( qb->qs, &user_data );
+ lfds600_queue_enqueue( qb->qs, user_data );
+
+ qb->operation_count += 2;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void benchmark_lfds600_ringbuffer( void )
+{
+ unsigned int
+ loop,
+ thread_count,
+ cpu_count;
+
+ struct lfds600_ringbuffer_state
+ *rs;
+
+ struct lfds600_ringbuffer_benchmark
+ *rb;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds600_atom_t
+ total_operations_for_full_test_for_all_cpus,
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;
+
+ double
+ mean_operations_per_second_per_cpu,
+ difference_per_second_per_cpu,
+ total_difference_per_second_per_cpu,
+ std_dev_per_second_per_cpu,
+ scalability;
+
+ /* TRD : here we benchmark the ringbuffer
+
+ the benchmark is to have a single ringbuffer
+ where a worker thread busy-works writing and then reading
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );
+
+ rb = (struct lfds600_ringbuffer_benchmark *) malloc( sizeof(struct lfds600_ringbuffer_benchmark) * cpu_count );
+
+ // TRD : print the benchmark ID and CSV header
+ printf( "\n"
+ "Release %d Ringbuffer Benchmark #1\n"
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS600_RELEASE_NUMBER );
+
+ // TRD : we run CPU count times for scalability
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )
+ {
+ // TRD : initialisation
+ lfds600_ringbuffer_new( &rs, 1000, NULL, NULL );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (rb+loop)->rs = rs;
+ (rb+loop)->operation_count = 0;
+ }
+
+ // TRD : main test
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds600_ringbuffer_thread_write_and_read, rb+loop );
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : post test math
+ total_operations_for_full_test_for_all_cpus = 0;
+ total_difference_per_second_per_cpu = 0;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ total_operations_for_full_test_for_all_cpus += (rb+loop)->operation_count;
+
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;
+
+ if( thread_count == 1 )
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ {
+ difference_per_second_per_cpu = ((double) (rb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;
+ }
+
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );
+
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);
+
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );
+
+ // TRD : cleanup
+ lfds600_ringbuffer_delete( rs, NULL, NULL );
+ }
+
+ free( rb );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION benchmark_lfds600_ringbuffer_thread_write_and_read( void *lfds600_ringbuffer_benchmark )
+{
+ struct lfds600_ringbuffer_benchmark
+ *rb;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ assert( lfds600_ringbuffer_benchmark != NULL );
+
+ rb = (struct lfds600_ringbuffer_benchmark *) lfds600_ringbuffer_benchmark;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds600_ringbuffer_get_write_element( rb->rs, &fe, NULL );
+ lfds600_ringbuffer_put_write_element( rb->rs, fe );
+
+ lfds600_ringbuffer_get_read_element( rb->rs, &fe );
+ lfds600_ringbuffer_put_read_element( rb->rs, fe );
+
+ rb->operation_count += 2;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void benchmark_lfds600_stack( void )
+{
+ unsigned int
+ loop,
+ thread_count,
+ cpu_count;
+
+ struct lfds600_stack_state
+ *ss;
+
+ struct lfds600_stack_benchmark
+ *sb;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds600_atom_t
+ total_operations_for_full_test_for_all_cpus,
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;
+
+ double
+ mean_operations_per_second_per_cpu,
+ difference_per_second_per_cpu,
+ total_difference_per_second_per_cpu,
+ std_dev_per_second_per_cpu,
+ scalability;
+
+ /* TRD : here we benchmark the stack
+
+ the benchmark is to have a single stack
+ where a worker thread busy-works pushing then popping
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );
+
+ sb = (struct lfds600_stack_benchmark *) malloc( sizeof(struct lfds600_stack_benchmark) * cpu_count );
+
+ // TRD : print the benchmark ID and CSV header
+ printf( "\n"
+ "Release %d Stack Benchmark #1\n"
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS600_RELEASE_NUMBER );
+
+ // TRD : we run CPU count times for scalability
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )
+ {
+ // TRD : initialisation
+ lfds600_stack_new( &ss, 1000 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (sb+loop)->ss = ss;
+ (sb+loop)->operation_count = 0;
+ }
+
+ // TRD : main test
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds600_stack_thread_push_and_pop, sb+loop );
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : post test math
+ total_operations_for_full_test_for_all_cpus = 0;
+ total_difference_per_second_per_cpu = 0;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ total_operations_for_full_test_for_all_cpus += (sb+loop)->operation_count;
+
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;
+
+ if( thread_count == 1 )
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ {
+ difference_per_second_per_cpu = ((double) (sb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;
+ }
+
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );
+
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);
+
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );
+
+ // TRD : cleanup
+ lfds600_stack_delete( ss, NULL, NULL );
+ }
+
+ free( sb );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION benchmark_lfds600_stack_thread_push_and_pop( void *lfds600_stack_benchmark )
+{
+ struct lfds600_stack_benchmark
+ *sb;
+
+ void
+ *user_data = NULL;
+
+ time_t
+ start_time;
+
+ assert( lfds600_stack_benchmark != NULL );
+
+ sb = (struct lfds600_stack_benchmark *) lfds600_stack_benchmark;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds600_stack_push( sb->ss, user_data );
+ lfds600_stack_pop( sb->ss, &user_data );
+
+ sb->operation_count += 2;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** ANSI includes *****/
+#include <assert.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+/***** internal includes *****/
+#include "abstraction.h"
+
+/***** external includes *****/
+#include "liblfds600.h"
+
+/***** defines *****/
+#define and &&
+#define or ||
+
+#define RAISED 1
+#define LOWERED 0
+
+#define NO_FLAGS 0x0
+
+/***** enums *****/
+enum lfds600_test_operation
+{
+ UNKNOWN,
+ HELP,
+ TEST,
+ BENCHMARK
+};
+
+/***** structs *****/
+#include "structures.h"
+
+/***** prototypes *****/
+int main( int argc, char **argv );
+
+void internal_display_test_name( char *test_name );
+void internal_display_test_result( unsigned int number_name_dvs_pairs, ... );
+void internal_display_data_structure_validity( enum data_structure_validity dvs );
+
+void benchmark_lfds600_freelist( void );
+ thread_return_t CALLING_CONVENTION benchmark_lfds600_freelist_thread_pop_and_push( void *lfds600_freelist_benchmark );
+
+void benchmark_lfds600_queue( void );
+ thread_return_t CALLING_CONVENTION benchmark_lfds600_queue_thread_delfds600_queue_and_enqueue( void *lfds600_queue_benchmark );
+
+void benchmark_lfds600_ringbuffer( void );
+ thread_return_t CALLING_CONVENTION benchmark_lfds600_ringbuffer_thread_write_and_read( void *lfds600_ringbuffer_benchmark );
+
+void benchmark_lfds600_stack( void );
+ thread_return_t CALLING_CONVENTION benchmark_lfds600_stack_thread_push_and_pop( void *lfds600_stack_benchmark );
+
+void test_lfds600_abstraction( void );
+ void abstraction_test_increment( void );
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_increment( void *shared_counter );
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_atomic_increment( void *shared_counter );
+ void abstraction_test_dcas( void );
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_dcas( void *abstraction_test_dcas_state );
+
+void test_lfds600_freelist( void );
+ void freelist_test_internal_popping( void );
+ int freelist_test_internal_popping_init( void **user_data, void *user_state );
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping( void *freelist_test_popping_state );
+ void freelist_test_internal_pushing( void );
+ int freelist_test_internal_pushing_init( void **user_data, void *user_state );
+ void freelist_test_internal_pushing_delete( void *user_data, void *user_state );
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_pushing( void *freelist_test_pushing_state );
+ void freelist_test_internal_popping_and_pushing( void );
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_popping( void *freelist_test_popping_and_pushing_state );
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_pushing( void *freelist_test_popping_and_pushing_state );
+ void freelist_test_internal_rapid_popping_and_pushing( void );
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_rapid_popping_and_pushing( void *lfds600_freelist_state );
+
+void test_lfds600_queue( void );
+ void queue_test_enqueuing( void );
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_enqueuer( void *queue_test_enqueuing_state );
+ void queue_test_dequeuing( void );
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_dequeuer( void *queue_test_dequeuing_state );
+ void queue_test_enqueuing_and_dequeuing( void );
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_enqueuer_and_dequeuer( void *queue_test_rapid_enqueuing_and_dequeuing_state );
+ void queue_test_rapid_enqueuing_and_dequeuing( void );
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_rapid_enqueuer_and_dequeuer( void *queue_test_rapid_enqueuing_and_dequeuing_state );
+
+void test_lfds600_ringbuffer( void );
+ void ringbuffer_test_reading( void );
+ thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_reader( void *ringbuffer_test_reading_state );
+ void ringbuffer_test_writing( void );
+ thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_writer( void *ringbuffer_test_writing_state );
+ void ringbuffer_test_reading_and_writing( void );
+ thread_return_t CALLING_CONVENTION ringbuffer_test_thread_reader_writer( void *ringbuffer_test_reading_and_writing_state );
+
+void test_lfds600_slist( void );
+ thread_return_t CALLING_CONVENTION lfds600_slist_internal_thread_head_writer( void *lfds600_slist_thread_start_state );
+ thread_return_t CALLING_CONVENTION lfds600_slist_internal_thread_after_writer( void *lfds600_slist_thread_start_state );
+ thread_return_t CALLING_CONVENTION lfds600_slist_internal_thread_traverser( void *lfds600_slist_thread_start_state );
+ thread_return_t CALLING_CONVENTION lfds600_slist_internal_thread_deleter_traverser( void *lfds600_slist_thread_start_state );
+
+void test_lfds600_stack( void );
+ thread_return_t CALLING_CONVENTION lfds600_stack_internal_thread_reader( void *lfds600_stack_state );
+ thread_return_t CALLING_CONVENTION lfds600_stack_internal_thread_writer( void *lfds600_stack_state );
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+int main( int argc, char **argv )
+{
+ enum lfds600_test_operation
+ operation = UNKNOWN;
+
+ unsigned int
+ loop,
+ iterations = 1;
+
+ assert( argc >= 1 );
+ assert( argv != NULL );
+
+ if( argc == 1 or argc >= 4 )
+ operation = HELP;
+
+ if( operation == UNKNOWN )
+ {
+ if( 0 == strcmp(*(argv+1), "test") )
+ {
+ operation = TEST;
+
+ // TRD : sscanf() may fail, but iterations is initialised to 1, so it's okay
+ if( argc == 3 )
+ sscanf( *(argv+2), "%u", &iterations );
+ }
+
+ if( 0 == strcmp(*(argv+1), "benchmark") )
+ operation = BENCHMARK;
+ }
+
+ switch( operation )
+ {
+ case UNKNOWN:
+ case HELP:
+ printf( "test [test|benchmark] [iterations]\n"
+ " test : run the test suite\n"
+ " benchmark : run the benchmark suite\n"
+ " iterations : optional, only applies to tests, default is 1\n" );
+ break;
+
+ case TEST:
+ for( loop = 1 ; loop < iterations+1 ; loop++ )
+ {
+ printf( "\n"
+ "Test Iteration %02u\n"
+ "=================\n", loop );
+
+ test_lfds600_abstraction();
+ test_lfds600_freelist();
+ test_lfds600_queue();
+ test_lfds600_ringbuffer();
+ test_lfds600_slist();
+ test_lfds600_stack();
+ }
+ break;
+
+ case BENCHMARK:
+ benchmark_lfds600_freelist();
+ benchmark_lfds600_queue();
+ benchmark_lfds600_ringbuffer();
+ benchmark_lfds600_stack();
+ break;
+ }
+
+ return( EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void internal_display_test_name( char *test_name )
+{
+ assert( test_name != NULL );
+
+ printf( "%s...", test_name );
+ fflush( stdout );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void internal_display_test_result( unsigned int number_name_dvs_pairs, ... )
+{
+ va_list
+ va;
+
+ int
+ passed_flag = RAISED;
+
+ unsigned int
+ loop;
+
+ char
+ *name;
+
+ enum data_structure_validity
+ dvs;
+
+ // TRD : number_name_dvs_pairs can be any value in its range
+
+ va_start( va, number_name_dvs_pairs );
+
+ for( loop = 0 ; loop < number_name_dvs_pairs ; loop++ )
+ {
+ name = va_arg( va, char * );
+ dvs = va_arg( va, enum data_structure_validity );
+
+ if( dvs != VALIDITY_VALID )
+ {
+ passed_flag = LOWERED;
+ break;
+ }
+ }
+
+ va_end( va );
+
+ if( passed_flag == RAISED )
+ puts( "passed" );
+
+ if( passed_flag == LOWERED )
+ {
+ printf( "failed (" );
+
+ va_start( va, number_name_dvs_pairs );
+
+ for( loop = 0 ; loop < number_name_dvs_pairs ; loop++ )
+ {
+ name = va_arg( va, char * );
+ dvs = va_arg( va, enum data_structure_validity );
+
+ printf( "%s ", name );
+ internal_display_data_structure_validity( dvs );
+
+ if( loop+1 < number_name_dvs_pairs )
+ printf( ", " );
+ }
+
+ va_end( va );
+
+ printf( ")\n" );
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void internal_display_data_structure_validity( enum data_structure_validity dvs )
+{
+ char
+ *string = NULL;
+
+ switch( dvs )
+ {
+ case VALIDITY_VALID:
+ string = "valid";
+ break;
+
+ case VALIDITY_INVALID_LOOP:
+ string = "invalid - loop detected";
+ break;
+
+ case VALIDITY_INVALID_MISSING_ELEMENTS:
+ string = "invalid - missing elements";
+ break;
+
+ case VALIDITY_INVALID_ADDITIONAL_ELEMENTS:
+ string = "invalid - additional elements";
+ break;
+
+ case VALIDITY_INVALID_TEST_DATA:
+ string = "invalid - invalid test data";
+ break;
+ }
+
+ printf( "%s", string );
+
+ return;
+}
+
--- /dev/null
+/***** structs *****/
+#pragma pack( push, LFDS600_ALIGN_DOUBLE_POINTER )
+
+/***** abstraction tests *****/
+struct abstraction_test_dcas_state
+{
+ volatile lfds600_atom_t
+ *shared_counter;
+
+ lfds600_atom_t
+ local_counter;
+};
+
+/***** freelist tests *****/
+struct freelist_test_popping_state
+{
+ struct lfds600_freelist_state
+ *fs,
+ *fs_thread_local;
+};
+
+struct freelist_test_pushing_state
+{
+ lfds600_atom_t
+ thread_number;
+
+ struct lfds600_freelist_state
+ *source_fs,
+ *fs;
+};
+
+struct freelist_test_popping_and_pushing_state
+{
+ struct lfds600_freelist_state
+ *local_fs,
+ *fs;
+};
+
+struct freelist_test_counter_and_thread_number
+{
+ lfds600_atom_t
+ thread_number;
+
+ unsigned long long int
+ counter;
+};
+
+/***** queue tests *****/
+struct queue_test_enqueuing_state
+{
+ struct lfds600_queue_state
+ *qs;
+
+ lfds600_atom_t
+ counter;
+};
+
+struct queue_test_dequeuing_state
+{
+ struct lfds600_queue_state
+ *qs;
+
+ int
+ error_flag;
+};
+
+struct queue_test_enqueuing_and_dequeuing_state
+{
+ struct lfds600_queue_state
+ *qs;
+
+ lfds600_atom_t
+ counter,
+ thread_number,
+ *per_thread_counters;
+
+ unsigned int
+ cpu_count;
+
+ int
+ error_flag;
+};
+
+struct queue_test_rapid_enqueuing_and_dequeuing_state
+{
+ struct lfds600_queue_state
+ *qs;
+
+ lfds600_atom_t
+ counter;
+};
+
+/***** ringbuffer tests *****/
+struct ringbuffer_test_reading_state
+{
+ struct lfds600_ringbuffer_state
+ *rs;
+
+ int
+ error_flag;
+
+ lfds600_atom_t
+ read_count;
+};
+
+struct ringbuffer_test_writing_state
+{
+ struct lfds600_ringbuffer_state
+ *rs;
+
+ lfds600_atom_t
+ write_count;
+};
+
+struct ringbuffer_test_reading_and_writing_state
+{
+ struct lfds600_ringbuffer_state
+ *rs;
+
+ lfds600_atom_t
+ counter,
+ *per_thread_counters;
+
+ unsigned int
+ cpu_count;
+
+ int
+ error_flag;
+};
+
+/***** slist tests *****/
+struct lfds600_slist_thread_start_state
+{
+ struct lfds600_slist_state
+ *ss;
+
+ struct lfds600_slist_element
+ *se;
+
+ time_t
+ duration;
+
+ unsigned long int
+ iteration_modulo;
+};
+
+/***** stack tests *****/
+
+/***** freelist benchmarks *****/
+struct lfds600_freelist_benchmark
+{
+ struct lfds600_freelist_state
+ *fs;
+
+ lfds600_atom_t
+ operation_count;
+};
+
+/***** queue benchmarks *****/
+struct lfds600_queue_benchmark
+{
+ struct lfds600_queue_state
+ *qs;
+
+ lfds600_atom_t
+ operation_count;
+};
+
+/***** ringbuffer benchmarks *****/
+struct lfds600_ringbuffer_benchmark
+{
+ struct lfds600_ringbuffer_state
+ *rs;
+
+ lfds600_atom_t
+ operation_count;
+};
+
+/***** stack benchmarks *****/
+struct lfds600_stack_benchmark
+{
+ struct lfds600_stack_state
+ *ss;
+
+ lfds600_atom_t
+ operation_count;
+};
+
+#pragma pack( pop )
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds600_abstraction( void )
+{
+ printf( "\n"
+ "Abstraction Tests\n"
+ "=================\n" );
+
+ abstraction_test_increment();
+ abstraction_test_dcas();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void abstraction_test_increment( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds600_atom_t
+ shared_counter = 0,
+ atomic_shared_counter = 0;
+
+ /* TRD : here we test lfds600_abstraction_increment
+
+ first, we run one thread per CPU where each thread increments
+ a shared counter 10,000,000 times - however, this first test
+ does NOT use atomic increment; it uses "++"
+
+ second, we repeat the exercise, but this time using
+ lfds600_abstraction_increment()
+
+ if the final value in the first test is less than (10,000,000*cpu_count)
+ then the system is sensitive to non-atomic increments; this means if
+ our atomic version of the test passes, we can have some degree of confidence
+ that it works
+
+ if the final value in the first test is in fact correct, then we can't know
+ that our atomic version has changed anything
+
+ and of course if the final value in the atomic test is wrong, we know things
+ are broken
+ */
+
+ internal_display_test_name( "Atomic increment" );
+
+ cpu_count = abstraction_cpu_count();
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ // TRD : non-atomic
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_increment, &shared_counter );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : atomic
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_atomic_increment, &atomic_shared_counter );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : results
+ if( shared_counter < (10000000 * cpu_count) and atomic_shared_counter == (10000000 * cpu_count) )
+ puts( "passed" );
+
+ if( shared_counter == (10000000 * cpu_count) and atomic_shared_counter == (10000000 * cpu_count) )
+ puts( "indeterminate" );
+
+ if( atomic_shared_counter < (10000000 * cpu_count) )
+ puts( "failed" );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_increment( void *shared_counter )
+{
+ volatile lfds600_atom_t
+ count = 0;
+
+ /* TRD : lfds600_atom_t must be volatile or the compiler
+ optimizes it away into a single store
+ */
+
+ assert( shared_counter != NULL );
+
+ while( count++ < 10000000 )
+ (*(lfds600_atom_t *) shared_counter)++;
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_atomic_increment( void *shared_counter )
+{
+ lfds600_atom_t
+ count = 0;
+
+ assert( shared_counter != NULL );
+
+ while( count++ < 10000000 )
+ lfds600_abstraction_increment( shared_counter );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void abstraction_test_dcas( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct abstraction_test_dcas_state
+ *atds;
+
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) volatile lfds600_atom_t
+ shared_counter[2] = { 0, 0 };
+
+ lfds600_atom_t
+ local_total = 0;
+
+ /* TRD : here we test lfds600_abstraction_dcas
+
+ we run one thread per CPU
+ we use lfds600_abstraction_dcas() to increment a shared counter
+ every time a thread successfully increments the counter,
+ it increments a thread local counter
+ the threads run for ten seconds
+ after the threads finish, we total the local counters
+ they should equal the shared counter
+ */
+
+ internal_display_test_name( "Atomic DCAS" );
+
+ cpu_count = abstraction_cpu_count();
+
+ atds = malloc( sizeof(struct abstraction_test_dcas_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (atds+loop)->shared_counter = shared_counter;
+ (atds+loop)->local_counter = 0;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_dcas, atds+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : results
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ local_total += (atds+loop)->local_counter;
+
+ if( local_total == shared_counter[0] )
+ puts( "passed" );
+
+ if( local_total != shared_counter[0] )
+ puts( "failed" );
+
+ // TRD : cleanup
+ free( atds );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_dcas( void *abstraction_test_dcas_state )
+{
+ struct abstraction_test_dcas_state
+ *atds;
+
+ time_t
+ start_time;
+
+ LFDS600_ALIGN(LFDS600_ALIGN_DOUBLE_POINTER) lfds600_atom_t
+ exchange[2],
+ compare[2];
+
+ assert( abstraction_test_dcas_state != NULL );
+
+ atds = (struct abstraction_test_dcas_state *) abstraction_test_dcas_state;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ compare[0] = *atds->shared_counter;
+ compare[1] = *(atds->shared_counter+1);
+
+ do
+ {
+ exchange[0] = compare[0] + 1;
+ exchange[1] = compare[1];
+ }
+ while( 0 == lfds600_abstraction_dcas(atds->shared_counter, exchange, compare) );
+
+ atds->local_counter++;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds600_freelist( void )
+{
+ printf( "\n"
+ "Freelist Tests\n"
+ "==============\n" );
+
+ freelist_test_internal_popping();
+ freelist_test_internal_pushing();
+ freelist_test_internal_popping_and_pushing();
+ freelist_test_internal_rapid_popping_and_pushing();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void freelist_test_internal_popping( void )
+{
+ unsigned int
+ loop,
+ cpu_count,
+ count;
+
+ thread_state_t
+ *thread_handles;
+
+ enum data_structure_validity
+ dvs = VALIDITY_VALID;
+
+ struct lfds600_freelist_state
+ *fs;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ struct freelist_test_popping_state
+ *ftps;
+
+ unsigned int
+ *found_count;
+
+ /* TRD : we create a freelist with 1,000,000 elements
+
+ the creation function runs in a single thread and creates
+ and pushes those elements onto the freelist
+
+ each element contains a void pointer which is its element number
+
+ we then run one thread per CPU
+ where each thread loops, popping as quickly as possible
+ each popped element is pushed onto a thread-local freelist
+
+ the threads run till the source freelist is empty
+
+ we then check the thread-local freelists
+ we should find we have every element
+
+ then tidy up
+ */
+
+ internal_display_test_name( "Popping" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds600_freelist_new( &fs, 1000000, freelist_test_internal_popping_init, NULL );
+ ftps = malloc( sizeof(struct freelist_test_popping_state) * cpu_count );
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (ftps+loop)->fs = fs;
+ lfds600_freelist_new( &(ftps+loop)->fs_thread_local, 0, NULL, NULL );
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_popping, ftps+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : now we check the thread-local freelists
+ found_count = malloc( sizeof(unsigned int) * 1000000 );
+ for( loop = 0 ; loop < 1000000 ; loop++ )
+ *(found_count+loop) = 0;
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ while( lfds600_freelist_pop((ftps+loop)->fs_thread_local, &fe) )
+ {
+ lfds600_freelist_get_user_data_from_element( fe, (void **) &count );
+ (*(found_count+count))++;
+ lfds600_freelist_push( fs, fe );
+ }
+ }
+
+ for( loop = 0 ; loop < 1000000 and dvs == VALIDITY_VALID ; loop++ )
+ {
+ if( *(found_count+loop) == 0 )
+ dvs = VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( *(found_count+loop) > 1 )
+ dvs = VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ // TRD : cleanup
+ free( found_count );
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ lfds600_freelist_delete( (ftps+loop)->fs_thread_local, NULL, NULL );
+ lfds600_freelist_delete( fs, NULL, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int freelist_test_internal_popping_init( void **user_data, void *user_state )
+{
+ static lfds600_atom_t
+ count = 0;
+
+ assert( user_data != NULL );
+ assert( user_state == NULL );
+
+ *(lfds600_atom_t *) user_data = count++;
+
+ return( 1 );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping( void *freelist_test_popping_state )
+{
+ struct freelist_test_popping_state
+ *ftps;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ assert( freelist_test_popping_state != NULL );
+
+ ftps = (struct freelist_test_popping_state *) freelist_test_popping_state;
+
+ while( lfds600_freelist_pop(ftps->fs, &fe) )
+ lfds600_freelist_push( ftps->fs_thread_local, fe );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void freelist_test_internal_pushing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ enum data_structure_validity
+ dvs;
+
+ struct freelist_test_pushing_state
+ *ftps;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ struct lfds600_freelist_state
+ *fs,
+ *cleanup_fs;
+
+ struct freelist_test_counter_and_thread_number
+ *cnt,
+ *counter_and_number_trackers;
+
+ struct lfds600_validation_info
+ vi = { 1000000, 1000000 };
+
+ /* TRD : we create an empty freelist, which we will push to
+
+ we then create one freelist per CPU, where this freelist
+ contains 1,000,000/cpu_count number of elements and
+ each element is an incrementing counter and unique ID
+ (from 0 to number of CPUs)
+
+ we then start one thread per CPU, where each thread is
+ given one of the populated freelists and pops from that
+ to push to the empty freelist
+
+ the reason for this is to achieve memory pre-allocation
+ which allows the pushing threads to run at maximum speed
+
+ the threads end when their freelists are empty
+
+ we then fully pop the now populated main freelist (onto
+ a second freelist, so we can cleanly free all memory),
+ checking that the counts increment on a per unique ID basis
+ and that the number of elements we pop equals 1,000,000
+ (since each element has an incrementing counter which is
+ unique on a per unique ID basis, we can know we didn't lose
+ any elements)
+ */
+
+ internal_display_test_name( "Pushing" );
+
+ cpu_count = abstraction_cpu_count();
+
+ ftps = malloc( sizeof(struct freelist_test_pushing_state) * cpu_count );
+
+ lfds600_freelist_new( &fs, 0, NULL, NULL );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (ftps+loop)->thread_number = (lfds600_atom_t) loop;
+ lfds600_freelist_new( &(ftps+loop)->source_fs, 1000000 / cpu_count, freelist_test_internal_pushing_init, (void *) (lfds600_atom_t) loop );
+ (ftps+loop)->fs = fs;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_pushing, ftps+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : now fully pop and verify the main freelist
+ lfds600_freelist_new( &cleanup_fs, 0, NULL, NULL );
+
+ counter_and_number_trackers = malloc( sizeof(struct freelist_test_counter_and_thread_number) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (counter_and_number_trackers+loop)->counter = (1000000 / cpu_count) * loop;
+ (counter_and_number_trackers+loop)->thread_number = (lfds600_atom_t) loop;
+ }
+
+ lfds600_freelist_query( fs, LFDS600_FREELIST_QUERY_VALIDATE, &vi, (void *) &dvs );
+
+ while( dvs == VALIDITY_VALID and lfds600_freelist_pop(fs, &fe) )
+ {
+ static int count = 0;
+
+ lfds600_freelist_get_user_data_from_element( fe, (void **) &cnt );
+
+ if( cnt->counter != (counter_and_number_trackers+cnt->thread_number)->counter++ )
+ dvs = VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ lfds600_freelist_push( cleanup_fs, fe );
+
+ count++;
+ }
+
+ // TRD : clean up
+ free( counter_and_number_trackers );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ lfds600_freelist_delete( (ftps+loop)->source_fs, NULL, NULL );
+
+ free( ftps );
+
+ lfds600_freelist_delete( cleanup_fs, freelist_test_internal_pushing_delete, NULL );
+ lfds600_freelist_delete( fs, NULL, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int freelist_test_internal_pushing_init( void **user_data, void *user_state )
+{
+ struct freelist_test_counter_and_thread_number
+ *ftcatn;
+
+ static lfds600_atom_t
+ counter = 0;
+
+ assert( user_data != NULL );
+ // TRD : user_state is being used as an integer type
+
+ *user_data = malloc( sizeof(struct freelist_test_counter_and_thread_number) );
+
+ ftcatn = (struct freelist_test_counter_and_thread_number *) *user_data;
+
+ ftcatn->counter = counter++;
+ ftcatn->thread_number = (lfds600_atom_t) user_state;
+
+ return( 1 );
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void freelist_test_internal_pushing_delete( void *user_data, void *user_state )
+{
+ assert( user_data != NULL );
+ assert( user_state == NULL );
+
+ free( user_data );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_pushing( void *freelist_test_pushing_state )
+{
+ struct freelist_test_pushing_state
+ *ftps;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ assert( freelist_test_pushing_state != NULL );
+
+ ftps = (struct freelist_test_pushing_state *) freelist_test_pushing_state;
+
+ while( lfds600_freelist_pop(ftps->source_fs, &fe) )
+ lfds600_freelist_push( ftps->fs, fe );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void freelist_test_internal_popping_and_pushing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ enum data_structure_validity
+ dvs;
+
+ struct lfds600_freelist_state
+ *fs;
+
+ struct freelist_test_popping_and_pushing_state
+ *pps;
+
+ struct lfds600_validation_info
+ vi;
+
+ /* TRD : we have two threads per CPU
+ the threads loop for ten seconds
+ the first thread pushes 100000 elements then pops 100000 elements
+ the second thread pops 100000 elements then pushes 100000 elements
+ all pushes and pops go onto the single main freelist
+
+ after time is up, all threads push what they have remaining onto
+ the main freelist
+
+ we then validate the main freelist
+ */
+
+ internal_display_test_name( "Popping and pushing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds600_freelist_new( &fs, 100000 * cpu_count, NULL, NULL );
+
+ pps = malloc( sizeof(struct freelist_test_popping_and_pushing_state) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (pps+loop)->fs = fs;
+ lfds600_freelist_new( &(pps+loop)->local_fs, 0, NULL, NULL );
+
+ (pps+loop+cpu_count)->fs = fs;
+ lfds600_freelist_new( &(pps+loop+cpu_count)->local_fs, 100000, NULL, NULL );
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_popping_and_pushing_start_popping, pps+loop );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, freelist_test_internal_thread_popping_and_pushing_start_pushing, pps+loop+cpu_count );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ lfds600_freelist_delete( (pps+loop)->local_fs, NULL, NULL );
+
+ free( pps );
+
+ vi.min_elements = vi.max_elements = 100000 * cpu_count * 2;
+
+ lfds600_freelist_query( fs, LFDS600_FREELIST_QUERY_VALIDATE, (void *) &vi, (void *) &dvs );
+
+ lfds600_freelist_delete( fs, NULL, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_popping( void *freelist_test_popping_and_pushing_state )
+{
+ struct freelist_test_popping_and_pushing_state
+ *pps;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ unsigned int
+ count;
+
+ assert( freelist_test_popping_and_pushing_state != NULL );
+
+ pps = (struct freelist_test_popping_and_pushing_state *) freelist_test_popping_and_pushing_state;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ count = 0;
+
+ while( count < 100000 )
+ {
+ lfds600_freelist_pop( pps->fs, &fe );
+
+ if( fe != NULL )
+ {
+ lfds600_freelist_push( pps->local_fs, fe );
+ count++;
+ }
+ }
+
+ while( lfds600_freelist_pop(pps->local_fs, &fe) )
+ lfds600_freelist_push( pps->fs, fe );
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_pushing( void *freelist_test_popping_and_pushing_state )
+{
+ struct freelist_test_popping_and_pushing_state
+ *pps;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ unsigned int
+ count;
+
+ assert( freelist_test_popping_and_pushing_state != NULL );
+
+ pps = (struct freelist_test_popping_and_pushing_state *) freelist_test_popping_and_pushing_state;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ while( lfds600_freelist_pop(pps->local_fs, &fe) )
+ lfds600_freelist_push( pps->fs, fe );
+
+ count = 0;
+
+ while( count < 1000 )
+ {
+ lfds600_freelist_pop( pps->fs, &fe );
+
+ if( fe != NULL )
+ {
+ lfds600_freelist_push( pps->local_fs, fe );
+ count++;
+ }
+ }
+ }
+
+ // TRD : now push whatever we have in our local freelist
+ while( lfds600_freelist_pop(pps->local_fs, &fe) )
+ lfds600_freelist_push( pps->fs, fe );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void freelist_test_internal_rapid_popping_and_pushing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds600_freelist_state
+ *fs;
+
+ struct lfds600_validation_info
+ vi;
+
+ enum data_structure_validity
+ dvs;
+
+ /* TRD : in these tests there is a fundamental antagonism between
+ how much checking/memory clean up that we do and the
+ likelyhood of collisions between threads in their lock-free
+ operations
+
+ the lock-free operations are very quick; if we do anything
+ much at all between operations, we greatly reduce the chance
+ of threads colliding
+
+ so we have some tests which do enough checking/clean up that
+ they can tell the freelist is valid and don't leak memory
+ and here, this test now is one of those which does minimal
+ checking - in fact, the nature of the test is that you can't
+ do any real checking - but goes very quickly
+
+ what we do is create a small freelist and then run one thread
+ per CPU, where each thread simply pops and then immediately
+ pushes
+
+ the test runs for ten seconds
+
+ after the test is done, the only check we do is to traverse
+ the freelist, checking for loops and ensuring the number of
+ elements is correct
+ */
+
+ internal_display_test_name( "Rapid popping and pushing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds600_freelist_new( &fs, cpu_count, NULL, NULL );
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_rapid_popping_and_pushing, fs );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ vi.min_elements = cpu_count;
+ vi.max_elements = cpu_count;
+
+ lfds600_freelist_query( fs, LFDS600_FREELIST_QUERY_VALIDATE, (void *) &vi, (void *) &dvs );
+
+ lfds600_freelist_delete( fs, NULL, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_rapid_popping_and_pushing( void *lfds600_freelist_state )
+{
+ struct lfds600_freelist_state
+ *fs;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ assert( lfds600_freelist_state != NULL );
+
+ fs = (struct lfds600_freelist_state *) lfds600_freelist_state;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds600_freelist_pop( fs, &fe );
+ lfds600_freelist_push( fs, fe );
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds600_queue( void )
+{
+ printf( "\n"
+ "Queue Tests\n"
+ "===========\n" );
+
+ queue_test_enqueuing();
+ queue_test_dequeuing();
+ queue_test_enqueuing_and_dequeuing();
+ queue_test_rapid_enqueuing_and_dequeuing();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void queue_test_enqueuing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds600_queue_state
+ *qs;
+
+ struct queue_test_enqueuing_state
+ *qtes;
+
+ lfds600_atom_t
+ user_data,
+ thread,
+ count,
+ *per_thread_counters;
+
+ struct lfds600_validation_info
+ vi = { 1000000, 1000000 };
+
+ enum data_structure_validity
+ dvs[2];
+
+ /* TRD : create an empty queue with 1,000,000 elements in its freelist
+ then run one thread per CPU
+ where each thread busy-works, enqueuing elements (until there are no more elements)
+ each element's void pointer of user data is (thread number | element number)
+ where element_number is a thread-local counter starting at 0
+ where the thread_number occupies the top byte
+
+ when we're done, we check that all the elements are present
+ and increment on a per-thread basis
+ */
+
+ internal_display_test_name( "Enqueuing" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds600_queue_new( &qs, 1000000 );
+
+ qtes = malloc( sizeof(struct queue_test_enqueuing_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (qtes+loop)->qs = qs;
+ (qtes+loop)->counter = (lfds600_atom_t) loop << (sizeof(lfds600_atom_t)*8-8);
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_simple_enqueuer, qtes+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ free( qtes );
+
+ /* TRD : first, validate the queue
+
+ then dequeue
+ we expect to find element numbers increment on a per thread basis
+ */
+
+ lfds600_queue_query( qs, LFDS600_QUEUE_QUERY_VALIDATE, &vi, dvs );
+
+ per_thread_counters = malloc( sizeof(lfds600_atom_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ while( dvs[0] == VALIDITY_VALID and dvs[1] == VALIDITY_VALID and lfds600_queue_dequeue(qs, (void *) &user_data) )
+ {
+ thread = user_data >> (sizeof(lfds600_atom_t)*8-8);
+ count = (user_data << 8) >> 8;
+
+ if( thread >= cpu_count )
+ {
+ dvs[0] = VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( count < per_thread_counters[thread] )
+ dvs[0] = VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( count > per_thread_counters[thread] )
+ dvs[0] = VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( count == per_thread_counters[thread] )
+ per_thread_counters[thread]++;
+ }
+
+ free( per_thread_counters );
+
+ lfds600_queue_delete( qs, NULL, NULL );
+
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_enqueuer( void *queue_test_enqueuing_state )
+{
+ struct queue_test_enqueuing_state
+ *qtes;
+
+ assert( queue_test_enqueuing_state != NULL );
+
+ qtes = (struct queue_test_enqueuing_state *) queue_test_enqueuing_state;
+
+ // TRD : top byte of counter is already our thread number
+ while( lfds600_queue_enqueue(qtes->qs, (void *) qtes->counter++) );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void queue_test_dequeuing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds600_queue_state
+ *qs;
+
+ struct queue_test_dequeuing_state
+ *qtds;
+
+ struct lfds600_validation_info
+ vi = { 0, 0 };
+
+ enum data_structure_validity
+ dvs[2];
+
+ /* TRD : create a queue with 1,000,000 elements
+
+ use a single thread to enqueue every element
+ each elements user data is an incrementing counter
+
+ then run one thread per CPU
+ where each busy-works dequeuing
+
+ when an element is dequeued, we check (on a per-thread basis) the
+ value deqeued is greater than the element previously dequeued
+ */
+
+ internal_display_test_name( "Dequeuing" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds600_queue_new( &qs, 1000000 );
+
+ for( loop = 0 ; loop < 1000000 ; loop++ )
+ lfds600_queue_enqueue( qs, (void *) (lfds600_atom_t) loop );
+
+ qtds = malloc( sizeof(struct queue_test_dequeuing_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (qtds+loop)->qs = qs;
+ (qtds+loop)->error_flag = LOWERED;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_simple_dequeuer, qtds+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : check queue is empty
+ lfds600_queue_query( qs, LFDS600_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ // TRD : check for raised error flags
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ if( (qtds+loop)->error_flag == RAISED )
+ dvs[0] = VALIDITY_INVALID_TEST_DATA;
+
+ free( qtds );
+
+ lfds600_queue_delete( qs, NULL, NULL );
+
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_dequeuer( void *queue_test_dequeuing_state )
+{
+ struct queue_test_dequeuing_state
+ *qtds;
+
+ lfds600_atom_t
+ *prev_user_data,
+ *user_data;
+
+ assert( queue_test_dequeuing_state != NULL );
+
+ qtds = (struct queue_test_dequeuing_state *) queue_test_dequeuing_state;
+
+ lfds600_queue_dequeue( qtds->qs, (void *) &prev_user_data );
+
+ while( lfds600_queue_dequeue(qtds->qs, (void *) &user_data) )
+ {
+ if( user_data <= prev_user_data )
+ qtds->error_flag = RAISED;
+
+ prev_user_data = user_data;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void queue_test_enqueuing_and_dequeuing( void )
+{
+ unsigned int
+ loop,
+ subloop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds600_queue_state
+ *qs;
+
+ struct queue_test_enqueuing_and_dequeuing_state
+ *qteds;
+
+ struct lfds600_validation_info
+ vi = { 0, 0 };
+
+ enum data_structure_validity
+ dvs[2];
+
+ internal_display_test_name( "Enqueuing and dequeuing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds600_queue_new( &qs, cpu_count );
+
+ qteds = malloc( sizeof(struct queue_test_enqueuing_and_dequeuing_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (qteds+loop)->qs = qs;
+ (qteds+loop)->thread_number = loop;
+ (qteds+loop)->counter = (lfds600_atom_t) loop << (sizeof(lfds600_atom_t)*8-8);
+ (qteds+loop)->cpu_count = cpu_count;
+ (qteds+loop)->error_flag = LOWERED;
+ (qteds+loop)->per_thread_counters = malloc( sizeof(lfds600_atom_t) * cpu_count );
+
+ for( subloop = 0 ; subloop < cpu_count ; subloop++ )
+ *((qteds+loop)->per_thread_counters+subloop) = 0;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_enqueuer_and_dequeuer, qteds+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ lfds600_queue_query( qs, LFDS600_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ if( (qteds+loop)->error_flag == RAISED )
+ dvs[0] = VALIDITY_INVALID_TEST_DATA;
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ free( (qteds+loop)->per_thread_counters );
+
+ free( qteds );
+
+ lfds600_queue_delete( qs, NULL, NULL );
+
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_enqueuer_and_dequeuer( void *queue_test_enqueuing_and_dequeuing_state )
+{
+ struct queue_test_enqueuing_and_dequeuing_state
+ *qteds;
+
+ time_t
+ start_time;
+
+ lfds600_atom_t
+ thread,
+ count,
+ user_data;
+
+ assert( queue_test_enqueuing_and_dequeuing_state != NULL );
+
+ qteds = (struct queue_test_enqueuing_and_dequeuing_state *) queue_test_enqueuing_and_dequeuing_state;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds600_queue_enqueue( qteds->qs, (void *) (qteds->counter++) );
+ lfds600_queue_dequeue( qteds->qs, (void *) &user_data );
+
+ thread = user_data >> (sizeof(lfds600_atom_t)*8-8);
+ count = (user_data << 8) >> 8;
+
+ if( thread >= qteds->cpu_count )
+ qteds->error_flag = RAISED;
+ else
+ {
+ if( count < qteds->per_thread_counters[thread] )
+ qteds->error_flag = RAISED;
+
+ if( count >= qteds->per_thread_counters[thread] )
+ qteds->per_thread_counters[thread] = count+1;
+ }
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void queue_test_rapid_enqueuing_and_dequeuing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds600_queue_state
+ *qs;
+
+ struct queue_test_rapid_enqueuing_and_dequeuing_state
+ *qtreds;
+
+ struct lfds600_validation_info
+ vi = { 50000, 50000 };
+
+ lfds600_atom_t
+ user_data,
+ thread,
+ count,
+ *per_thread_counters;
+
+ enum data_structure_validity
+ dvs[2];
+
+ internal_display_test_name( "Rapid enqueuing and dequeuing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds600_queue_new( &qs, 100000 );
+
+ for( loop = 0 ; loop < 50000 ; loop++ )
+ lfds600_queue_enqueue( qs, NULL );
+
+ qtreds = malloc( sizeof(struct queue_test_rapid_enqueuing_and_dequeuing_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (qtreds+loop)->qs = qs;
+ (qtreds+loop)->counter = (lfds600_atom_t) loop << (sizeof(lfds600_atom_t)*8-8);
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_rapid_enqueuer_and_dequeuer, qtreds+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ lfds600_queue_query( qs, LFDS600_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ // TRD : now check results
+ per_thread_counters = malloc( sizeof(lfds600_atom_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ while( dvs[0] == VALIDITY_VALID and dvs[1] == VALIDITY_VALID and lfds600_queue_dequeue(qs, (void *) &user_data) )
+ {
+ thread = user_data >> (sizeof(lfds600_atom_t)*8-8);
+ count = (user_data << 8) >> 8;
+
+ if( thread >= cpu_count )
+ {
+ dvs[0] = VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( per_thread_counters[thread] == 0 )
+ per_thread_counters[thread] = count;
+
+ if( count < per_thread_counters[thread] )
+ dvs[0] = VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( count >= per_thread_counters[thread] )
+ per_thread_counters[thread] = count+1;
+ }
+
+ free( per_thread_counters );
+
+ free( qtreds );
+
+ lfds600_queue_delete( qs, NULL, NULL );
+
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_rapid_enqueuer_and_dequeuer( void *queue_test_rapid_enqueuing_and_dequeuing_state )
+{
+ struct queue_test_rapid_enqueuing_and_dequeuing_state
+ *qtreds;
+
+ time_t
+ start_time;
+
+ lfds600_atom_t
+ user_data;
+
+ assert( queue_test_rapid_enqueuing_and_dequeuing_state != NULL );
+
+ qtreds = (struct queue_test_rapid_enqueuing_and_dequeuing_state *) queue_test_rapid_enqueuing_and_dequeuing_state;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds600_queue_enqueue( qtreds->qs, (void *) (qtreds->counter++) );
+ lfds600_queue_dequeue( qtreds->qs, (void *) &user_data );
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds600_ringbuffer( void )
+{
+ printf( "\n"
+ "Ringbuffer Tests\n"
+ "================\n" );
+
+ ringbuffer_test_reading();
+ ringbuffer_test_writing();
+ ringbuffer_test_reading_and_writing();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void ringbuffer_test_reading( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds600_ringbuffer_state
+ *rs;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ struct ringbuffer_test_reading_state
+ *rtrs;
+
+ struct lfds600_validation_info
+ vi = { 0, 0 };
+
+ enum data_structure_validity
+ dvs[3];
+
+ lfds600_atom_t
+ total_read = 0;
+
+ /* TRD : we create a single ringbuffer
+ with 1,000,000 elements
+ we populate the ringbuffer, where the
+ user data is an incrementing counter
+
+ we create one thread per CPU
+ where each thread busy-works,
+ reading until the ringbuffer is empty
+
+ each thread keep track of the number of reads it manages
+ and that each user data it reads is greater than the
+ previous user data that was read
+ */
+
+ internal_display_test_name( "Reading" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds600_ringbuffer_new( &rs, 1000000, NULL, NULL );
+
+ for( loop = 0 ; loop < 1000000 ; loop++ )
+ {
+ lfds600_ringbuffer_get_write_element( rs, &fe, NULL );
+ lfds600_freelist_set_user_data_in_element( fe, (void *) (lfds600_atom_t) loop );
+ lfds600_ringbuffer_put_write_element( rs, fe );
+ }
+
+ rtrs = malloc( sizeof(struct ringbuffer_test_reading_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (rtrs+loop)->rs = rs;
+ (rtrs+loop)->read_count = 0;
+ (rtrs+loop)->error_flag = LOWERED;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_simple_reader, rtrs+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ lfds600_ringbuffer_query( rs, LFDS600_RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ // TRD : check for raised error flags
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ if( (rtrs+loop)->error_flag == RAISED )
+ dvs[0] = VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : check thread reads total to 1,000,000
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ total_read += (rtrs+loop)->read_count;
+
+ if( total_read < 1000000 )
+ dvs[0] = VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( total_read > 1000000 )
+ dvs[0] = VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ free( rtrs );
+
+ lfds600_ringbuffer_delete( rs, NULL, NULL );
+
+ internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_reader( void *ringbuffer_test_reading_state )
+{
+ struct ringbuffer_test_reading_state
+ *rtrs;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ lfds600_atom_t
+ *prev_user_data,
+ *user_data;
+
+ assert( ringbuffer_test_reading_state != NULL );
+
+ rtrs = (struct ringbuffer_test_reading_state *) ringbuffer_test_reading_state;
+
+ lfds600_ringbuffer_get_read_element( rtrs->rs, &fe );
+ lfds600_freelist_get_user_data_from_element( fe, (void **) &prev_user_data );
+ lfds600_ringbuffer_put_read_element( rtrs->rs, fe );
+
+ rtrs->read_count++;
+
+ while( lfds600_ringbuffer_get_read_element(rtrs->rs, &fe) )
+ {
+ lfds600_freelist_get_user_data_from_element( fe, (void **) &user_data );
+ lfds600_ringbuffer_put_read_element( rtrs->rs, fe );
+
+ if( user_data <= prev_user_data )
+ rtrs->error_flag = RAISED;
+
+ prev_user_data = user_data;
+
+ rtrs->read_count++;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void ringbuffer_test_writing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds600_ringbuffer_state
+ *rs;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ struct ringbuffer_test_writing_state
+ *rtws;
+
+ struct lfds600_validation_info
+ vi = { 100000, 100000 };
+
+ enum data_structure_validity
+ dvs[3];
+
+ lfds600_atom_t
+ thread,
+ count,
+ user_data,
+ *per_thread_counters;
+
+ /* TRD : we create a single ringbuffer
+ with 100000 elements
+ the ringbuffers starts empty
+
+ we create one thread per CPU
+ where each thread busy-works writing
+ for ten seconds
+
+ the user data in each written element is a combination
+ of the thread number and the counter
+
+ after the threads are complete, we validate by
+ checking the user data counters increment on a per thread
+ basis
+ */
+
+ internal_display_test_name( "Writing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds600_ringbuffer_new( &rs, 100000, NULL, NULL );
+
+ rtws = malloc( sizeof(struct ringbuffer_test_writing_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (rtws+loop)->rs = rs;
+ (rtws+loop)->write_count = (lfds600_atom_t) loop << (sizeof(lfds600_atom_t)*8-8);
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_simple_writer, rtws+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : now check results
+ per_thread_counters = malloc( sizeof(lfds600_atom_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ lfds600_ringbuffer_query( rs, LFDS600_RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ while( dvs[0] == VALIDITY_VALID and dvs[1] == VALIDITY_VALID and dvs[2] == VALIDITY_VALID and lfds600_ringbuffer_get_read_element(rs, &fe) )
+ {
+ lfds600_freelist_get_user_data_from_element( fe, (void *) &user_data );
+
+ thread = user_data >> (sizeof(lfds600_atom_t)*8-8);
+ count = (user_data << 8) >> 8;
+
+ if( thread >= cpu_count )
+ {
+ dvs[0] = VALIDITY_INVALID_TEST_DATA;
+ lfds600_ringbuffer_put_read_element( rs, fe );
+ break;
+ }
+
+ if( per_thread_counters[thread] == 0 )
+ per_thread_counters[thread] = count;
+
+ if( count < per_thread_counters[thread] )
+ dvs[0] = VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( count >= per_thread_counters[thread] )
+ per_thread_counters[thread] = count+1;
+
+ lfds600_ringbuffer_put_read_element( rs, fe );
+ }
+
+ free( per_thread_counters );
+
+ free( rtws );
+
+ lfds600_ringbuffer_delete( rs, NULL, NULL );
+
+ internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_writer( void *ringbuffer_test_writing_state )
+{
+ struct ringbuffer_test_writing_state
+ *rtws;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ assert( ringbuffer_test_writing_state != NULL );
+
+ rtws = (struct ringbuffer_test_writing_state *) ringbuffer_test_writing_state;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds600_ringbuffer_get_write_element( rtws->rs, &fe, NULL );
+ lfds600_freelist_set_user_data_in_element( fe, (void *) (lfds600_atom_t) (rtws->write_count++) );
+ lfds600_ringbuffer_put_write_element( rtws->rs, fe );
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void ringbuffer_test_reading_and_writing( void )
+{
+ unsigned int
+ loop,
+ subloop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds600_ringbuffer_state
+ *rs;
+
+ struct ringbuffer_test_reading_and_writing_state
+ *rtrws;
+
+ struct lfds600_validation_info
+ vi = { 0, 0 };
+
+ enum data_structure_validity
+ dvs[3];
+
+ /* TRD : we create a single ringbuffer
+ with 100000 elements
+ the ringbuffers starts empty
+
+ we create one thread per CPU
+ where each thread busy-works writing
+ and then immediately reading
+ for ten seconds
+
+ the user data in each written element is a combination
+ of the thread number and the counter
+
+ while a thread runs, it keeps track of the
+ counters for the other threads and throws an error
+ if it sees the number stay the same or decrease
+ */
+
+ internal_display_test_name( "Reading and writing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds600_ringbuffer_new( &rs, 100000, NULL, NULL );
+
+ rtrws = malloc( sizeof(struct ringbuffer_test_reading_and_writing_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (rtrws+loop)->rs = rs;
+ (rtrws+loop)->counter = (lfds600_atom_t) loop << (sizeof(lfds600_atom_t)*8-8);
+ (rtrws+loop)->cpu_count = cpu_count;
+ (rtrws+loop)->error_flag = LOWERED;
+ (rtrws+loop)->per_thread_counters = malloc( sizeof(lfds600_atom_t) * cpu_count );
+
+ for( subloop = 0 ; subloop < cpu_count ; subloop++ )
+ *((rtrws+loop)->per_thread_counters+subloop) = 0;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_reader_writer, rtrws+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ lfds600_ringbuffer_query( rs, LFDS600_RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ if( (rtrws+loop)->error_flag == RAISED )
+ dvs[0] = VALIDITY_INVALID_TEST_DATA;
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ free( (rtrws+loop)->per_thread_counters );
+
+ free( rtrws );
+
+ lfds600_ringbuffer_delete( rs, NULL, NULL );
+
+ internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION ringbuffer_test_thread_reader_writer( void *ringbuffer_test_reading_and_writing_state )
+{
+ struct ringbuffer_test_reading_and_writing_state
+ *rtrws;
+
+ struct lfds600_freelist_element
+ *fe;
+
+ lfds600_atom_t
+ user_data,
+ thread,
+ count;
+
+ time_t
+ start_time;
+
+ assert( ringbuffer_test_reading_and_writing_state != NULL );
+
+ rtrws = (struct ringbuffer_test_reading_and_writing_state *) ringbuffer_test_reading_and_writing_state;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds600_ringbuffer_get_write_element( rtrws->rs, &fe, NULL );
+ lfds600_freelist_set_user_data_in_element( fe, (void *) (lfds600_atom_t) (rtrws->counter++) );
+ lfds600_ringbuffer_put_write_element( rtrws->rs, fe );
+
+ lfds600_ringbuffer_get_read_element( rtrws->rs, &fe );
+ lfds600_freelist_get_user_data_from_element( fe, (void *) &user_data );
+
+ thread = user_data >> (sizeof(lfds600_atom_t)*8-8);
+ count = (user_data << 8) >> 8;
+
+ if( thread >= rtrws->cpu_count )
+ rtrws->error_flag = RAISED;
+ else
+ {
+ if( count < rtrws->per_thread_counters[thread] )
+ rtrws->error_flag = RAISED;
+
+ if( count >= rtrws->per_thread_counters[thread] )
+ rtrws->per_thread_counters[thread] = count+1;
+ }
+
+ lfds600_ringbuffer_put_read_element( rtrws->rs, fe );
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds600_slist( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds600_slist_thread_start_state
+ stss;
+
+ /* TRD : 1. one head writer per CPU
+ 2. make one element, then one after writer per CPU
+ 3. make a list, then one list traverser per CPU
+ 4. one head writer and one list traverser per CPU
+ 5. make one element, then one after writer and one list traverser per CPU
+ 6. make a list, then one 100% deleter-traverser per CPU
+ 7. make a list, then one 25% deleter-traverser per CPU
+ 8. one head writer and one 100% deleter-traverse per CPU
+ 9. one head writer and one 25% deleter-traverse per CPU
+ 10. make one element, then one after writer and one 100% deleter-traverser per CPU
+ 11. make one element, then one after writer and one 25% deleter-traverser per CPU
+ 12. one head writer, one after writer, one traverser and one 25% deleter-traverser per CPU
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ printf( "\n"
+ "SList Test\n"
+ "==========\n" );
+
+ // TRD : 1. one head writer per CPU
+
+ printf( "\n"
+ "1. one head writer per CPU\n"
+ "==========================\n" );
+
+ lfds600_slist_new( &stss.ss, NULL, NULL );
+ stss.iteration_modulo = 1;
+ stss.se = NULL;
+ stss.duration = 1;
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_slist_internal_thread_head_writer, &stss );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_slist_delete( stss.ss );
+
+ free( thread_handles );
+
+ // TRD : 2. make one element, then one after writer per CPU
+
+ printf( "\n"
+ "2. make one element, then one after writer per CPU\n"
+ "==================================================\n" );
+
+ lfds600_slist_new( &stss.ss, NULL, NULL );
+ stss.iteration_modulo = 1;
+ stss.se = lfds600_slist_new_head( stss.ss, (void *) NULL );
+ stss.duration = 1;
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_slist_internal_thread_after_writer, &stss );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_slist_delete( stss.ss );
+
+ free( thread_handles );
+
+ // TRD : 3. make a list, then one list traverser per CPU
+
+ printf( "\n"
+ "3. make a list, then one list traverser per CPU\n"
+ "===============================================\n" );
+
+ lfds600_slist_new( &stss.ss, NULL, NULL );
+ stss.iteration_modulo = 1;
+ stss.se = NULL;
+ stss.duration = 10;
+
+ // TRD : small list so we get collisions
+ for( loop = 0 ; loop < 10 ; loop++ )
+ lfds600_slist_new_head( stss.ss, (void *) 0 );
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_slist_internal_thread_traverser, &stss );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_slist_delete( stss.ss );
+
+ free( thread_handles );
+
+ // TRD : 4. one head writer and one list traverser per CPU
+
+ printf( "\n"
+ "4. one head writer and one list traverser per CPU\n"
+ "=================================================\n" );
+
+ lfds600_slist_new( &stss.ss, NULL, NULL );
+ stss.iteration_modulo = 1;
+ stss.se = NULL;
+ stss.duration = 1;
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )\
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_slist_internal_thread_head_writer, &stss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds600_slist_internal_thread_traverser, &stss );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_slist_delete( stss.ss );
+
+ free( thread_handles );
+
+ // TRD : 5. make one element, then one after writer and one list traverser per CPU
+
+ printf( "\n"
+ "5. make one element, then one after writer and one list traverser per CPU\n"
+ "=========================================================================\n" );
+
+ lfds600_slist_new( &stss.ss, NULL, NULL );
+ stss.iteration_modulo = 1;
+ stss.se = lfds600_slist_new_head( stss.ss, (void *) NULL );
+ stss.duration = 1;
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )\
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_slist_internal_thread_after_writer, &stss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds600_slist_internal_thread_traverser, &stss );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_slist_delete( stss.ss );
+
+ free( thread_handles );
+
+ // TRD : 6. make a list, then one 100% deleter-traverser per CPU
+
+ printf( "\n"
+ "6. make a list, then one 100%% deleter-traverser per CPU\n"
+ "=======================================================\n" );
+
+ lfds600_slist_new( &stss.ss, NULL, NULL );
+ stss.iteration_modulo = 1;
+ stss.se = NULL;
+ stss.duration = 1;
+
+ for( loop = 0 ; loop < 10000 ; loop++ )
+ lfds600_slist_new_head( stss.ss, (void *) 0 );
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_slist_internal_thread_deleter_traverser, &stss );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_slist_delete( stss.ss );
+
+ free( thread_handles );
+
+ // TRD : 7. make a list, then one 25% deleter-traverser per CPU
+
+ printf( "\n"
+ "7. make a list, then one 25%% deleter-traverser per CPU\n"
+ "======================================================\n" );
+
+ lfds600_slist_new( &stss.ss, NULL, NULL );
+ stss.iteration_modulo = 4;
+ stss.se = NULL;
+ stss.duration = 1;
+
+ for( loop = 0 ; loop < 10000 ; loop++ )
+ lfds600_slist_new_head( stss.ss, (void *) 0 );
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_slist_internal_thread_deleter_traverser, &stss );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_slist_delete( stss.ss );
+
+ free( thread_handles );
+
+ // TRD : 8. one head writer and one 100% deleter-traverse per CPU
+
+ printf( "\n"
+ "8. one head writer and one 100%% deleter-traverse per CPU\n"
+ "========================================================\n" );
+
+ lfds600_slist_new( &stss.ss, NULL, NULL );
+ stss.iteration_modulo = 1;
+ stss.se = NULL;
+ stss.duration = 10;
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_slist_internal_thread_head_writer, &stss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds600_slist_internal_thread_deleter_traverser, &stss );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_slist_delete( stss.ss );
+
+ free( thread_handles );
+
+ // TRD : 9. one head writer and one 25% deleter-traverse per CPU
+
+ printf( "\n"
+ "9. one head writer and one 25%% deleter-traverse per CPU\n"
+ "=======================================================\n" );
+
+ lfds600_slist_new( &stss.ss, NULL, NULL );
+ stss.iteration_modulo = 4;
+ stss.se = NULL;
+ stss.duration = 1;
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_slist_internal_thread_head_writer, &stss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds600_slist_internal_thread_deleter_traverser, &stss );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_slist_delete( stss.ss );
+
+ free( thread_handles );
+
+ // TRD : 10. make one element, then one after writer and one 100% deleter-traverser per CPU
+
+ printf( "\n"
+ "10. make one element, then one after writer and one 100%% deleter-traverser per CPU\n"
+ "==================================================================================\n" );
+
+ lfds600_slist_new( &stss.ss, NULL, NULL );
+ stss.iteration_modulo = 1;
+ stss.se = lfds600_slist_new_head( stss.ss, (void *) NULL );
+ stss.duration = 10;
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_slist_internal_thread_after_writer, &stss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds600_slist_internal_thread_deleter_traverser, &stss );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_slist_delete( stss.ss );
+
+ free( thread_handles );
+
+ // TRD : 11. make one element, then one after writer and one 25% deleter-traverser per CPU
+
+ printf( "\n"
+ "11. make one element, then one after writer and one 25%% deleter-traverser per CPU\n"
+ "=================================================================================\n" );
+
+ lfds600_slist_new( &stss.ss, NULL, NULL );
+ stss.iteration_modulo = 4;
+ stss.se = lfds600_slist_new_head( stss.ss, (void *) NULL );
+ stss.duration = 1;
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_slist_internal_thread_after_writer, &stss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds600_slist_internal_thread_deleter_traverser, &stss );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_slist_delete( stss.ss );
+
+ free( thread_handles );
+
+ // TRD : 12. one head writer, one after writer, one traverser and one 25% deleter-traverser per CPU
+
+ printf( "\n"
+ "12. one head writer, one after writer, one traverser and one 25%% deleter-traverser per CPU\n"
+ "==========================================================================================\n" );
+
+ lfds600_slist_new( &stss.ss, NULL, NULL );
+ stss.iteration_modulo = 4;
+ stss.se = lfds600_slist_new_head( stss.ss, (void *) NULL );
+ stss.duration = 1;
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 4 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_slist_internal_thread_head_writer, &stss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds600_slist_internal_thread_after_writer, &stss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count*2], loop, lfds600_slist_internal_thread_traverser, &stss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count*3], loop, lfds600_slist_internal_thread_deleter_traverser, &stss );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 4 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_slist_delete( stss.ss );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION lfds600_slist_internal_thread_head_writer( void *lfds600_slist_thread_start_state )
+{
+ struct lfds600_slist_thread_start_state
+ *stss;
+
+ time_t
+ start_time;
+
+ unsigned long int
+ count = 0;
+
+ assert( lfds600_slist_thread_start_state != NULL );
+
+ stss = (struct lfds600_slist_thread_start_state *) lfds600_slist_thread_start_state;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + stss->duration )
+ if( lfds600_slist_new_head(stss->ss, (void *) 0) )
+ count++;
+
+ printf( "head writer count = %lu\n", count );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION lfds600_slist_internal_thread_after_writer( void *lfds600_slist_thread_start_state )
+{
+ struct lfds600_slist_thread_start_state
+ *stss;
+
+ time_t
+ start_time;
+
+ unsigned long int
+ count = 0;
+
+ assert( lfds600_slist_thread_start_state != NULL );
+
+ stss = (struct lfds600_slist_thread_start_state *) lfds600_slist_thread_start_state;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + stss->duration )
+ if( lfds600_slist_new_next(stss->se, (void *) 0) )
+ count++;
+
+ printf( "after writer count = %lu\n", count );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION lfds600_slist_internal_thread_traverser( void *lfds600_slist_thread_start_state )
+{
+ struct lfds600_slist_thread_start_state
+ *stss;
+
+ time_t
+ start_time;
+
+ unsigned long int
+ count = 0,
+ iteration = 0;
+
+ struct lfds600_slist_element
+ *se;
+
+ assert( lfds600_slist_thread_start_state != NULL );
+
+ stss = (struct lfds600_slist_thread_start_state *) lfds600_slist_thread_start_state;
+
+ time( &start_time );
+
+ lfds600_slist_get_head( stss->ss, &se );
+
+ while( time(NULL) < start_time + stss->duration )
+ {
+ if( !(iteration % stss->iteration_modulo) )
+ {
+ lfds600_slist_get_next( se, &se );
+ count++;
+ }
+
+ if( se == NULL )
+ {
+ lfds600_slist_get_head( stss->ss, &se );
+ count++;
+ }
+
+ iteration++;
+ }
+
+ printf( "traverser count = %lu\n", count );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION lfds600_slist_internal_thread_deleter_traverser( void *lfds600_slist_thread_start_state )
+{
+ struct lfds600_slist_thread_start_state
+ *stss;
+
+ time_t
+ start_time;
+
+ unsigned long int
+ count = 0,
+ iteration = 0;
+
+ struct lfds600_slist_element
+ *se;
+
+ assert( lfds600_slist_thread_start_state != NULL );
+
+ stss = (struct lfds600_slist_thread_start_state *) lfds600_slist_thread_start_state;
+
+ time( &start_time );
+
+ lfds600_slist_get_head( stss->ss, &se );
+
+ while( time(NULL) < start_time + stss->duration )
+ {
+ if( se != NULL and !(iteration % stss->iteration_modulo) )
+ {
+ lfds600_slist_delete_element( stss->ss, se );
+ count++;
+ }
+
+ if( se != NULL )
+ lfds600_slist_get_next( se, &se );
+
+ if( se == NULL )
+ lfds600_slist_get_head( stss->ss, &se );
+
+ iteration++;
+ }
+
+ printf( "deleter-traverser count = %lu\n", count );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds600_stack( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ struct lfds600_stack_state
+ *ss;
+
+ thread_state_t
+ *thread_handles;
+
+ /* TRD : there are 5 tests
+
+ 1. single reader thread per CPU
+ - stack always empty
+ 2. single writer thread per CPU
+ - stack always full
+ 3. one reader and one writer thread per CPU
+ - stack balanced
+ 4. one reader and two writer threads per CPU
+ - stack grows
+ 5. two reader and one writer thread per CPU
+ - stack tends to empty
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ printf( "\n"
+ "Stack Test\n"
+ "==========\n" );
+
+ // TRD : 1. single reader thread per CPU
+
+ printf( "\n"
+ "1. single reader thread per CPU\n"
+ "===============================\n" );
+
+ lfds600_stack_new( &ss, 10000 );
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_stack_internal_thread_reader, ss );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_stack_delete( ss, NULL, NULL );
+
+ free( thread_handles );
+
+ // TRD : 2. single writer thread per CPU
+
+ printf( "\n"
+ "2. single writer thread per CPU\n"
+ "===============================\n" );
+
+ lfds600_stack_new( &ss, 10000 );
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_stack_internal_thread_writer, ss );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_stack_delete( ss, NULL, NULL );
+
+ free( thread_handles );
+
+ // TRD : 3. one reader and one writer thread per CPU
+
+ printf( "\n"
+ "3. one reader and one writer thread per CPU\n"
+ "===========================================\n" );
+
+ lfds600_stack_new( &ss, 10000 );
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_stack_internal_thread_reader, ss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds600_stack_internal_thread_writer, ss );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_stack_delete( ss, NULL, NULL );
+
+ free( thread_handles );
+
+ // TRD : 4. one reader and two writer threads per CPU
+
+ printf( "\n"
+ "4. one reader and two writer threads per CPU\n"
+ "============================================\n" );
+
+ lfds600_stack_new( &ss, 10000 );
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 3 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_stack_internal_thread_reader, ss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds600_stack_internal_thread_writer, ss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count*2], loop, lfds600_stack_internal_thread_writer, ss );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 3 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_stack_delete( ss, NULL, NULL );
+
+ free( thread_handles );
+
+ // TRD : 5. two reader and one writer thread per CPU
+
+ printf( "\n"
+ "5. two reader and one writer thread per CPU\n"
+ "===========================================\n" );
+
+ lfds600_stack_new( &ss, 10000 );
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 3 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, lfds600_stack_internal_thread_reader, ss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds600_stack_internal_thread_reader, ss );
+ abstraction_thread_start( &thread_handles[loop+cpu_count*2], loop, lfds600_stack_internal_thread_writer, ss );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 3 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ lfds600_stack_delete( ss, NULL, NULL );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION lfds600_stack_internal_thread_reader( void *lfds600_stack_state )
+{
+ struct lfds600_stack_state
+ *ss;
+
+ void
+ *user_data;
+
+ time_t
+ start_time;
+
+ unsigned long long int
+ count = 0;
+
+ assert( lfds600_stack_state != NULL );
+
+ ss = (struct lfds600_stack_state *) lfds600_stack_state;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ if( lfds600_stack_pop(ss, &user_data) )
+ count++;
+ }
+
+ printf( "read count = %llu\n", count );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION lfds600_stack_internal_thread_writer( void *lfds600_stack_state )
+{
+ struct lfds600_stack_state
+ *ss;
+
+ time_t
+ start_time;
+
+ unsigned long long int
+ count = 0;
+
+ assert( lfds600_stack_state != NULL );
+
+ ss = (struct lfds600_stack_state *) lfds600_stack_state;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ // TRD : we don't store any user data
+ if( lfds600_stack_push(ss, NULL) )
+ count++;
+ }
+
+ printf( "write count = %llu\n", count );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+\r
+Microsoft Visual Studio Solution File, Format Version 10.00\r
+# Visual Studio 2008\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test", "test.vcproj", "{6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}"\r
+ ProjectSection(ProjectDependencies) = postProject\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05} = {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}\r
+ EndProjectSection\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblfds600", "..\liblfds600\liblfds600.vcproj", "{F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug DLL|Win32 = Debug DLL|Win32\r
+ Debug DLL|x64 = Debug DLL|x64\r
+ Debug Lib|Win32 = Debug Lib|Win32\r
+ Debug Lib|x64 = Debug Lib|x64\r
+ Debug|Win32 = Debug|Win32\r
+ Debug|x64 = Debug|x64\r
+ Release DLL|Win32 = Release DLL|Win32\r
+ Release DLL|x64 = Release DLL|x64\r
+ Release Lib|Win32 = Release Lib|Win32\r
+ Release Lib|x64 = Release Lib|x64\r
+ Release|Win32 = Release|Win32\r
+ Release|x64 = Release|x64\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug DLL|Win32.ActiveCfg = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug DLL|x64.ActiveCfg = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug Lib|Win32.ActiveCfg = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug Lib|x64.ActiveCfg = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|Win32.ActiveCfg = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|Win32.Build.0 = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|x64.ActiveCfg = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|x64.Build.0 = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release DLL|Win32.ActiveCfg = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release DLL|x64.ActiveCfg = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release Lib|Win32.ActiveCfg = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release Lib|x64.ActiveCfg = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|Win32.ActiveCfg = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|Win32.Build.0 = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|x64.ActiveCfg = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|x64.Build.0 = Release|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.Build.0 = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.ActiveCfg = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.Build.0 = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.ActiveCfg = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.Build.0 = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.ActiveCfg = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.Build.0 = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|Win32.ActiveCfg = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|Win32.Build.0 = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|x64.ActiveCfg = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|x64.Build.0 = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.ActiveCfg = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.Build.0 = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.ActiveCfg = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.Build.0 = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.ActiveCfg = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.Build.0 = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.ActiveCfg = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.Build.0 = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|Win32.ActiveCfg = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|Win32.Build.0 = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|x64.ActiveCfg = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|x64.Build.0 = Release Lib|x64\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+<?xml version="1.0" encoding="Windows-1252"?>\r
+<VisualStudioProject\r
+ ProjectType="Visual C++"\r
+ Version="9.00"\r
+ Name="test"\r
+ ProjectGUID="{6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}"\r
+ RootNamespace="test"\r
+ TargetFrameworkVersion="196613"\r
+ >\r
+ <Platforms>\r
+ <Platform\r
+ Name="Win32"\r
+ />\r
+ <Platform\r
+ Name="x64"\r
+ />\r
+ </Platforms>\r
+ <ToolFiles>\r
+ </ToolFiles>\r
+ <Configurations>\r
+ <Configuration\r
+ Name="Debug|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds600\inc""\r
+ MinimalRebuild="true"\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ RuntimeLibrary="1"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="4"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="true"\r
+ SubSystem="1"\r
+ TargetMachine="1"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds600\inc""\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="1"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="true"\r
+ SubSystem="1"\r
+ TargetMachine="17"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds600\inc""\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="0"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="false"\r
+ SubSystem="1"\r
+ OptimizeReferences="2"\r
+ EnableCOMDATFolding="2"\r
+ TargetMachine="1"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds600\inc""\r
+ ExceptionHandling="0"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="false"\r
+ SubSystem="1"\r
+ OptimizeReferences="2"\r
+ EnableCOMDATFolding="2"\r
+ TargetMachine="17"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ </Configurations>\r
+ <References>\r
+ </References>\r
+ <Files>\r
+ <Filter\r
+ Name="src"\r
+ Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"\r
+ UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"\r
+ >\r
+ <File\r
+ RelativePath=".\src\abstraction.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\abstraction_cpu_count.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\abstraction_thread_start.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\abstraction_thread_wait.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_freelist.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_queue.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_ringbuffer.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_stack.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\main.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\misc.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\structures.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_abstraction.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_freelist.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_queue.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_ringbuffer.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_slist.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_stack.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ </Files>\r
+ <Globals>\r
+ </Globals>\r
+</VisualStudioProject>\r
--- /dev/null
+building liblfds\r
+================\r
+\r
+Windows (user-mode)\r
+===================\r
+1. Use Microsoft Visual Studio 2008 or Visual C++ 2008 Express Edition\r
+ to load "liblfds.sln".\r
+\r
+2. Use Microsoft Windows SDK and GNUmake to run makefile.windows (obviously\r
+ you'll need to have run the appropriate vcvars*.bat first; you can build\r
+ for both IA64, 64-bit and 32-bit - just run the correct vcvars batch file).\r
+\r
+ Targets are "librel", "libdbg", "dllrel", "dlldbg" and "clean". You need\r
+ to clean between switching targets.\r
+\r
+Windows (kernel)\r
+================\r
+Use the Windows Driver Kit "build" command. Prior to running "build",\r
+if you wish to build a static library, run the batch file\r
+"runme_before_win_kernel_static_lib_build.bat"; if you wish to\r
+build a dynamic library, instead run "runme_before_win_kernel_dynamic_lib_build.bat".\r
+\r
+The Windows kernel build system is rather limited and rather than\r
+really rather messing up the directory/file structure just for the\r
+Windows kernel platform, I've instead arranged it that these batch\r
+files do the necessary work so that "build" will work.\r
+\r
+The batch files are idempotent; you can run them as often as you\r
+like, in any order, at any time (before or after builds), and they'll\r
+do the right thing.\r
+\r
+Linux\r
+=====\r
+Use GNUmake to run "makefile.linux". Targets are "arrel", "ardbg",\r
+"sorel", "sodbg" and "clean". You need to clean between switching\r
+targets.\r
+\r
+\r
--- /dev/null
+DIRS = src
+
--- /dev/null
+#ifndef __LIBLFDS601_H
+
+ /***** library header *****/
+ #define LFDS601_RELEASE_NUMBER_STRING "6.0.1"
+
+
+
+
+ /***** lfds601_abstraction *****/
+
+ /***** defines *****/
+ #if (defined _WIN64 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+ // TRD : 64-bit Windows user-mode with the Microsoft C compiler, any CPU
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <windows.h>
+ #include <intrin.h>
+ typedef unsigned __int64 lfds601_atom_t;
+ #define LFDS601_INLINE extern __forceinline
+ #define LFDS601_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS601_ALIGN_SINGLE_POINTER 8
+ #define LFDS601_ALIGN_DOUBLE_POINTER 16
+ #endif
+
+ #if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+ // TRD : 32-bit Windows user-mode with the Microsoft C compiler, any CPU
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <windows.h>
+ #include <intrin.h>
+ typedef unsigned long int lfds601_atom_t;
+ #define LFDS601_INLINE extern __forceinline
+ #define LFDS601_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS601_ALIGN_SINGLE_POINTER 4
+ #define LFDS601_ALIGN_DOUBLE_POINTER 8
+
+ // TRD : this define is documented but missing in Microsoft Platform SDK v7.0
+ #define _InterlockedCompareExchangePointer(destination, exchange, compare) _InterlockedCompareExchange((volatile long *) destination, (long) exchange, (long) compare)
+ #endif
+
+ #if (defined _WIN64 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+ // TRD : 64-bit Windows kernel with the Microsoft C compiler, any CPU
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <wdm.h>
+ typedef unsigned __int64 lfds601_atom_t;
+ #define LFDS601_INLINE extern __forceinline
+ #define LFDS601_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS601_ALIGN_SINGLE_POINTER 8
+ #define LFDS601_ALIGN_DOUBLE_POINTER 16
+ #endif
+
+ #if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+ // TRD : 32-bit Windows kernel with the Microsoft C compiler, any CPU
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <wdm.h>
+ typedef unsigned long int lfds601_atom_t;
+ #define LFDS601_INLINE extern __forceinline
+ #define LFDS601_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS601_ALIGN_SINGLE_POINTER 4
+ #define LFDS601_ALIGN_DOUBLE_POINTER 8
+
+ // TRD : this define is documented but missing in Microsoft Platform SDK v7.0
+ #define _InterlockedCompareExchangePointer(destination, exchange, compare) _InterlockedCompareExchange((volatile long *) destination, (long) exchange, (long) compare)
+ #endif
+
+ #if (defined __unix__ && defined __x86_64__ && __GNUC__)
+ // TRD : any UNIX with GCC on x64
+ #define _XOPEN_SOURCE 600
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ typedef unsigned long long int lfds601_atom_t;
+ #define LFDS601_INLINE inline
+ #define LFDS601_ALIGN(alignment) __attribute__( (aligned(alignment)) )
+ #define LFDS601_ALIGN_SINGLE_POINTER 8
+ #define LFDS601_ALIGN_DOUBLE_POINTER 16
+ #endif
+
+ #if (defined __unix__ && defined __i686__ && __GNUC__)
+ // TRD : any UNIX with GCC on x86
+ #define _XOPEN_SOURCE 600
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ typedef unsigned long int lfds601_atom_t;
+ #define LFDS601_INLINE inline
+ #define LFDS601_ALIGN(alignment) __attribute__( (aligned(alignment)) )
+ #define LFDS601_ALIGN_SINGLE_POINTER 4
+ #define LFDS601_ALIGN_DOUBLE_POINTER 8
+ #endif
+
+ #if (defined __unix__ && defined __arm__ && __GNUC__)
+ // TRD : any UNIX with GCC on ARM
+ #define _XOPEN_SOURCE 600
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ typedef unsigned long int lfds601_atom_t;
+ #define LFDS601_INLINE inline
+ #define LFDS601_ALIGN(alignment) __attribute__( (aligned(alignment)) )
+ #define LFDS601_ALIGN_SINGLE_POINTER 4
+ #define LFDS601_ALIGN_DOUBLE_POINTER 8
+ #endif
+
+ /***** enums *****/
+ enum lfds601_data_structure_validity
+ {
+ LFDS601_VALIDITY_VALID,
+ LFDS601_VALIDITY_INVALID_LOOP,
+ LFDS601_VALIDITY_INVALID_MISSING_ELEMENTS,
+ LFDS601_VALIDITY_INVALID_ADDITIONAL_ELEMENTS,
+ LFDS601_VALIDITY_INVALID_TEST_DATA
+ };
+
+ /***** structs *****/
+ struct lfds601_validation_info
+ {
+ lfds601_atom_t
+ min_elements,
+ max_elements;
+ };
+
+ /***** public prototypes *****/
+ void lfds601_abstraction_aligned_free( void *memory );
+ void *lfds601_abstraction_aligned_malloc( size_t size, size_t align_in_bytes );
+ lfds601_atom_t lfds601_abstraction_cas( volatile lfds601_atom_t *destination, lfds601_atom_t exchange, lfds601_atom_t compare );
+ unsigned char lfds601_abstraction_dcas( volatile lfds601_atom_t *destination, lfds601_atom_t *exchange, lfds601_atom_t *compare );
+ lfds601_atom_t lfds601_abstraction_increment( lfds601_atom_t *value );
+
+
+
+
+
+ /***** lfds601_freelist *****/
+
+ /***** enums *****/
+ enum lfds601_freelist_query_type
+ {
+ LFDS601_FREELIST_QUERY_ELEMENT_COUNT,
+ LFDS601_FREELIST_QUERY_VALIDATE
+ };
+
+ /***** incomplete types *****/
+ struct lfds601_freelist_state;
+ struct lfds601_freelist_element;
+
+ /***** public prototypes *****/
+ int lfds601_freelist_new( struct lfds601_freelist_state **fs, lfds601_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state );
+ void lfds601_freelist_delete( struct lfds601_freelist_state *fs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+ lfds601_atom_t lfds601_freelist_new_elements( struct lfds601_freelist_state *fs, lfds601_atom_t number_elements );
+
+ struct lfds601_freelist_element *lfds601_freelist_pop( struct lfds601_freelist_state *fs, struct lfds601_freelist_element **fe );
+ struct lfds601_freelist_element *lfds601_freelist_guaranteed_pop( struct lfds601_freelist_state *fs, struct lfds601_freelist_element **fe );
+ void lfds601_freelist_push( struct lfds601_freelist_state *fs, struct lfds601_freelist_element *fe );
+
+ void *lfds601_freelist_get_user_data_from_element( struct lfds601_freelist_element *fe, void **user_data );
+ void lfds601_freelist_set_user_data_in_element( struct lfds601_freelist_element *fe, void *user_data );
+
+ void lfds601_freelist_query( struct lfds601_freelist_state *fs, enum lfds601_freelist_query_type query_type, void *query_input, void *query_output );
+
+
+
+
+
+ /***** lfds601_queue *****/
+
+ /***** enums *****/
+ enum lfds601_queue_query_type
+ {
+ LFDS601_QUEUE_QUERY_ELEMENT_COUNT,
+ LFDS601_QUEUE_QUERY_VALIDATE
+ };
+
+ /***** incomplete types *****/
+ struct lfds601_queue_state;
+
+ /***** public prototypes *****/
+ int lfds601_queue_new( struct lfds601_queue_state **sq, lfds601_atom_t number_elements );
+ void lfds601_queue_delete( struct lfds601_queue_state *qs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+ int lfds601_queue_enqueue( struct lfds601_queue_state *qs, void *user_data );
+ int lfds601_queue_guaranteed_enqueue( struct lfds601_queue_state *qs, void *user_data );
+ int lfds601_queue_dequeue( struct lfds601_queue_state *qs, void **user_data );
+
+ void lfds601_queue_query( struct lfds601_queue_state *qs, enum lfds601_queue_query_type query_type, void *query_input, void *query_output );
+
+
+
+
+
+ /***** lfds601_ringbuffer *****/
+
+ /***** enums *****/
+ enum lfds601_ringbuffer_query_type
+ {
+ LFDS601_RINGBUFFER_QUERY_VALIDATE
+ };
+
+ /***** incomplete types *****/
+ struct lfds601_ringbuffer_state;
+
+ /***** public prototypes *****/
+ int lfds601_ringbuffer_new( struct lfds601_ringbuffer_state **rs, lfds601_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state );
+ void lfds601_ringbuffer_delete( struct lfds601_ringbuffer_state *rs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+ struct lfds601_freelist_element *lfds601_ringbuffer_get_read_element( struct lfds601_ringbuffer_state *rs, struct lfds601_freelist_element **fe );
+ struct lfds601_freelist_element *lfds601_ringbuffer_get_write_element( struct lfds601_ringbuffer_state *rs, struct lfds601_freelist_element **fe, int *overwrite_flag );
+
+ void lfds601_ringbuffer_put_read_element( struct lfds601_ringbuffer_state *rs, struct lfds601_freelist_element *fe );
+ void lfds601_ringbuffer_put_write_element( struct lfds601_ringbuffer_state *rs, struct lfds601_freelist_element *fe );
+
+ void lfds601_ringbuffer_query( struct lfds601_ringbuffer_state *rs, enum lfds601_ringbuffer_query_type query_type, void *query_input, void *query_output );
+
+
+
+
+
+ /***** lfds601_slist *****/
+
+ /***** incomplete types *****/
+ struct lfds601_slist_state;
+ struct lfds601_slist_element;
+
+ /***** public prototypes *****/
+ int lfds601_slist_new( struct lfds601_slist_state **ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+ void lfds601_slist_delete( struct lfds601_slist_state *ss );
+
+ struct lfds601_slist_element *lfds601_slist_new_head( struct lfds601_slist_state *ss, void *user_data );
+ struct lfds601_slist_element *lfds601_slist_new_next( struct lfds601_slist_element *se, void *user_data );
+
+ void lfds601_slist_delete_element( struct lfds601_slist_state *ss, struct lfds601_slist_element *se );
+ void lfds601_slist_delete_all_elements( struct lfds601_slist_state *ss );
+
+ int lfds601_slist_get_user_data_from_element( struct lfds601_slist_element *se, void **user_data );
+ int lfds601_slist_set_user_data_in_element( struct lfds601_slist_element *se, void *user_data );
+
+ struct lfds601_slist_element *lfds601_slist_get_head( struct lfds601_slist_state *ss, struct lfds601_slist_element **se );
+ struct lfds601_slist_element *lfds601_slist_get_next( struct lfds601_slist_element *se, struct lfds601_slist_element **next_se );
+ struct lfds601_slist_element *lfds601_slist_get_head_and_then_next( struct lfds601_slist_state *ss, struct lfds601_slist_element **se );
+
+
+
+
+
+ /***** lfds601_stack *****/
+
+ /***** enums *****/
+ enum lfds601_stack_query_type
+ {
+ LFDS601_STACK_QUERY_ELEMENT_COUNT
+ };
+
+ /***** incomplete types *****/
+ struct lfds601_stack_state;
+
+ /***** public prototypes *****/
+ int lfds601_stack_new( struct lfds601_stack_state **ss, lfds601_atom_t number_elements );
+ void lfds601_stack_delete( struct lfds601_stack_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+ void lfds601_stack_clear( struct lfds601_stack_state *ss, void (*user_data_clear_function)(void *user_data, void *user_state), void *user_state );
+
+ int lfds601_stack_push( struct lfds601_stack_state *ss, void *user_data );
+ int lfds601_stack_guaranteed_push( struct lfds601_stack_state *ss, void *user_data );
+ int lfds601_stack_pop( struct lfds601_stack_state *ss, void **user_data );
+
+ void lfds601_stack_query( struct lfds601_stack_state *ss, enum lfds601_stack_query_type query_type, void *query_input, void *query_output );
+
+
+
+
+
+ #define __LIBLFDS601_H
+
+#endif
+
--- /dev/null
+EXPORTS\r
+\r
+lfds601_freelist_delete = lfds601_freelist_delete @1\r
+lfds601_freelist_get_user_data_from_element = lfds601_freelist_get_user_data_from_element @2\r
+lfds601_freelist_guaranteed_pop = lfds601_freelist_guaranteed_pop @3\r
+lfds601_freelist_new = lfds601_freelist_new @4\r
+lfds601_freelist_new_elements = lfds601_freelist_new_elements @5\r
+lfds601_freelist_pop = lfds601_freelist_pop @6\r
+lfds601_freelist_push = lfds601_freelist_push @7\r
+lfds601_freelist_query = lfds601_freelist_query @8\r
+lfds601_freelist_set_user_data_in_element = lfds601_freelist_set_user_data_in_element @9\r
+\r
+lfds601_queue_delete = lfds601_queue_delete @10\r
+lfds601_queue_dequeue = lfds601_queue_dequeue @11\r
+lfds601_queue_enqueue = lfds601_queue_enqueue @12\r
+lfds601_queue_guaranteed_enqueue = lfds601_queue_guaranteed_enqueue @13\r
+lfds601_queue_new = lfds601_queue_new @14\r
+lfds601_queue_query = lfds601_queue_query @15\r
+\r
+lfds601_ringbuffer_delete = lfds601_ringbuffer_delete @16\r
+lfds601_ringbuffer_get_read_element = lfds601_ringbuffer_get_read_element @17\r
+lfds601_ringbuffer_get_write_element = lfds601_ringbuffer_get_write_element @18\r
+lfds601_ringbuffer_new = lfds601_ringbuffer_new @19\r
+lfds601_ringbuffer_put_read_element = lfds601_ringbuffer_put_read_element @20\r
+lfds601_ringbuffer_put_write_element = lfds601_ringbuffer_put_write_element @21\r
+\r
+lfds601_slist_delete = lfds601_slist_delete @ 22\r
+lfds601_slist_delete_all_elements = lfds601_slist_delete_all_elements @ 23\r
+lfds601_slist_delete_element = lfds601_slist_delete_element @ 24\r
+lfds601_slist_get_head = lfds601_slist_get_head @ 25\r
+lfds601_slist_get_head_and_then_next = lfds601_slist_get_head_and_then_next @ 26\r
+lfds601_slist_get_next = lfds601_slist_get_next @ 27\r
+lfds601_slist_get_user_data_from_element = lfds601_slist_get_user_data_from_element @ 28\r
+lfds601_slist_new = lfds601_slist_new @ 29\r
+lfds601_slist_new_head = lfds601_slist_new_head @ 30\r
+lfds601_slist_new_next = lfds601_slist_new_next @ 31\r
+lfds601_slist_set_user_data_in_element = lfds601_slist_set_user_data_in_element @ 32\r
+\r
+lfds601_stack_clear = lfds601_stack_clear @33\r
+lfds601_stack_delete = lfds601_stack_delete @34\r
+lfds601_stack_guaranteed_push = lfds601_stack_guaranteed_push @35\r
+lfds601_stack_new = lfds601_stack_new @36\r
+lfds601_stack_pop = lfds601_stack_pop @37\r
+lfds601_stack_push = lfds601_stack_push @38\r
+lfds601_stack_query = lfds601_stack_query @39\r
+\r
--- /dev/null
+\r
+Microsoft Visual Studio Solution File, Format Version 10.00\r
+# Visual Studio 2008\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblfds601", "liblfds601.vcproj", "{F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug DLL|Win32 = Debug DLL|Win32\r
+ Debug DLL|x64 = Debug DLL|x64\r
+ Debug Lib|Win32 = Debug Lib|Win32\r
+ Debug Lib|x64 = Debug Lib|x64\r
+ Release DLL|Win32 = Release DLL|Win32\r
+ Release DLL|x64 = Release DLL|x64\r
+ Release Lib|Win32 = Release Lib|Win32\r
+ Release Lib|x64 = Release Lib|x64\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.Build.0 = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.ActiveCfg = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.Build.0 = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.ActiveCfg = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.Build.0 = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.ActiveCfg = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.Build.0 = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.ActiveCfg = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.Build.0 = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.ActiveCfg = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.Build.0 = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.ActiveCfg = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.Build.0 = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.ActiveCfg = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.Build.0 = Release Lib|x64\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+<?xml version="1.0" encoding="Windows-1252"?>\r
+<VisualStudioProject\r
+ ProjectType="Visual C++"\r
+ Version="9.00"\r
+ Name="liblfds601"\r
+ ProjectGUID="{F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}"\r
+ RootNamespace="liblfds"\r
+ TargetFrameworkVersion="196613"\r
+ >\r
+ <Platforms>\r
+ <Platform\r
+ Name="Win32"\r
+ />\r
+ <Platform\r
+ Name="x64"\r
+ />\r
+ </Platforms>\r
+ <ToolFiles>\r
+ </ToolFiles>\r
+ <Configurations>\r
+ <Configuration\r
+ Name="Debug Lib|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ MinimalRebuild="true"\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug Lib|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ SmallerTypeCheck="true"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release Lib|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release Lib|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug DLL|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ MinimalRebuild="true"\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ RuntimeLibrary="3"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds601.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug DLL|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ TargetEnvironment="3"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ SmallerTypeCheck="true"\r
+ RuntimeLibrary="3"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds601.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release DLL|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="2"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds601.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release DLL|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ TargetEnvironment="3"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="2"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds601.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ </Configurations>\r
+ <References>\r
+ </References>\r
+ <Files>\r
+ <Filter\r
+ Name="inc"\r
+ Filter="h;hpp;hxx;hm;inl;inc;xsd"\r
+ UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"\r
+ >\r
+ <File\r
+ RelativePath=".\inc\abstraction.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\inc\freelist.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\inc\liblfds601.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\inc\queue.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\inc\ringbuffer.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\inc\stack.h"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="src"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds601_internal.h"\r
+ >\r
+ </File>\r
+ <Filter\r
+ Name="lfds601_abstraction"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds601_abstraction\lfds601_abstraction_aligned_free.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_abstraction\lfds601_abstraction_aligned_malloc.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_abstraction\lfds601_abstraction_cas.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_abstraction\lfds601_abstraction_dcas.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_abstraction\lfds601_abstraction_increment.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_abstraction\lfds601_abstraction_internal.h"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds601_freelist"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds601_freelist\lfds601_freelist_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_freelist\lfds601_freelist_get_and_set.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_freelist\lfds601_freelist_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_freelist\lfds601_freelist_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_freelist\lfds601_freelist_pop_push.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_freelist\lfds601_freelist_query.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds601_queue"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds601_queue\lfds601_queue_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_queue\lfds601_queue_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_queue\lfds601_queue_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_queue\lfds601_queue_query.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_queue\lfds601_queue_queue.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds601_ringbuffer"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds601_ringbuffer\lfds601_ringbuffer_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_ringbuffer\lfds601_ringbuffer_get_and_put.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_ringbuffer\lfds601_ringbuffer_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_ringbuffer\lfds601_ringbuffer_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_ringbuffer\lfds601_ringbuffer_query.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds601_slist"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds601_slist\lfds601_slist_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_slist\lfds601_slist_get_and_set.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_slist\lfds601_slist_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_slist\lfds601_slist_link.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_slist\lfds601_slist_new.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds601_stack"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds601_stack\lfds601_stack_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_stack\lfds601_stack_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_stack\lfds601_stack_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_stack\lfds601_stack_push_pop.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds601_stack\lfds601_stack_query.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ </Filter>\r
+ </Files>\r
+ <Globals>\r
+ </Globals>\r
+</VisualStudioProject>\r
--- /dev/null
+##### paths #####\r
+BINDIR = bin\r
+INCDIR = inc\r
+OBJDIR = obj\r
+SRCDIR = src\r
+\r
+##### misc #####\r
+QUIETLY = 1>/dev/null 2>/dev/null\r
+\r
+##### sources, objects and libraries #####\r
+BINNAME = liblfds601\r
+AR_BINARY = $(BINDIR)/$(BINNAME).a\r
+SO_BINARY = $(BINDIR)/$(BINNAME).so\r
+SRCDIRS = lfds601_abstraction lfds601_freelist lfds601_queue lfds601_ringbuffer lfds601_slist lfds601_stack\r
+# TRD : be aware - in the linux makefile, with the one-pass linking behaviour of the GNU linker, the order\r
+# of source files matters! this is because it leads to the ordering of objects in the library and\r
+# that in turn, since the data structures all use the freelist API and the abstraction API, has to be\r
+# correct \r
+SOURCES = lfds601_queue_delete.c lfds601_queue_new.c lfds601_queue_query.c lfds601_queue_queue.c \\r
+ lfds601_ringbuffer_delete.c lfds601_ringbuffer_get_and_put.c lfds601_ringbuffer_new.c lfds601_ringbuffer_query.c \\r
+ lfds601_slist_delete.c lfds601_slist_get_and_set.c lfds601_slist_link.c lfds601_slist_new.c \\r
+ lfds601_stack_delete.c lfds601_stack_new.c lfds601_stack_push_pop.c lfds601_stack_query.c \\r
+ lfds601_freelist_delete.c lfds601_freelist_get_and_set.c lfds601_freelist_new.c lfds601_freelist_query.c lfds601_freelist_pop_push.c \\r
+ lfds601_abstraction_aligned_free.c lfds601_abstraction_aligned_malloc.c lfds601_abstraction_cas.c lfds601_abstraction_dcas.c lfds601_abstraction_increment.c\r
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))\r
+\r
+##### CPU variants #####\r
+GCCARCH = $(shell uname -m)\r
+\r
+ifeq ($(GCCARCH),x86_64)\r
+ GCCARCH = core2\r
+endif\r
+\r
+ifeq ($(findstring arm,$(GCCARCH)),arm)\r
+ GCCARCH = armv6k\r
+endif\r
+\r
+##### tools #####\r
+MAKE = make\r
+MFLAGS = \r
+\r
+DG = gcc\r
+DGFLAGS = -MM -std=c99 -I"$(SRCDIR)" -I"$(INCDIR)"\r
+\r
+CC = gcc\r
+CBASE = -Wall -Wno-unknown-pragmas -std=c99 -march=$(GCCARCH) -c -I"$(SRCDIR)" -I"$(INCDIR)"\r
+CFREL = -O2 -finline-functions -Wno-strict-aliasing\r
+CFDBG = -O0 -g\r
+\r
+AR = ar\r
+AFLAGS = -rcs\r
+\r
+LD = gcc\r
+LFBASE = -Wall -std=c99 -shared\r
+LFREL = -O2 -s\r
+LFDBG = -O0 -g\r
+\r
+##### rel/dbg .a/.so variants #####\r
+ifeq ($(findstring so,$(MAKECMDGOALS)),so)\r
+ CBASE := $(CBASE) -fpic\r
+endif\r
+\r
+CFLAGS = $(CBASE) $(CFDBG)\r
+LFLAGS = $(LFBASE) $(LFDBG)\r
+\r
+ifeq ($(findstring rel,$(MAKECMDGOALS)),rel)\r
+ CFLAGS = $(CBASE) $(CFREL)\r
+ LFLAGS = $(LFBASE) $(LFREL)\r
+endif\r
+\r
+##### search paths #####\r
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))\r
+\r
+##### implicit rules #####\r
+$(OBJDIR)/%.o : %.c\r
+ $(DG) $(DGFLAGS) $< >$(OBJDIR)/$*.d\r
+ $(CC) $(CFLAGS) -o $@ $<\r
+\r
+##### explicit rules #####\r
+$(AR_BINARY) : $(OBJECTS)\r
+ $(AR) $(AFLAGS) $(AR_BINARY) $(OBJECTS)\r
+\r
+$(SO_BINARY) : $(OBJECTS)\r
+ $(LD) $(LFLAGS) $(SYSLIBS) $(OBJECTS) -o $(SO_BINARY)\r
+\r
+##### phony #####\r
+.PHONY : clean arrel ardbg sorel sodbg\r
+\r
+clean : \r
+ @rm -f $(BINDIR)/$(BINNAME).* $(OBJDIR)/*.o $(OBJDIR)/*.d\r
+\r
+arrel : $(AR_BINARY)\r
+ardbg : $(AR_BINARY)\r
+\r
+sorel : $(SO_BINARY)\r
+sodbg : $(SO_BINARY)\r
+\r
+##### dependencies #####\r
+-include $(DEPENDS)\r
+\r
+##### notes #####\r
+# TRD : we use -std=c99 purely to permit C++ style comments\r
+\r
--- /dev/null
+##### paths #####\r
+BINDIR = bin\r
+INCDIR = inc\r
+OBJDIR = obj\r
+SRCDIR = src\r
+\r
+##### misc #####\r
+QUIETLY = 1>nul 2>nul\r
+\r
+##### sources, objects and libraries #####\r
+BINNAME = liblfds601\r
+LIB_BINARY = $(BINDIR)\$(BINNAME).lib\r
+DLL_BINARY = $(BINDIR)\$(BINNAME).dll\r
+SRCDIRS = lfds601_abstraction lfds601_freelist lfds601_queue lfds601_ringbuffer lfds601_slist lfds601_stack\r
+SOURCES = lfds601_abstraction_aligned_free.c lfds601_abstraction_aligned_malloc.c lfds601_abstraction_cas.c lfds601_abstraction_dcas.c lfds601_abstraction_increment.c \\r
+ lfds601_freelist_delete.c lfds601_freelist_get_and_set.c lfds601_freelist_new.c lfds601_freelist_query.c lfds601_freelist_pop_push.c \\r
+ lfds601_queue_delete.c lfds601_queue_new.c lfds601_queue_query.c lfds601_queue_queue.c \\r
+ lfds601_ringbuffer_delete.c lfds601_ringbuffer_get_and_put.c lfds601_ringbuffer_new.c lfds601_ringbuffer_query.c \\r
+ lfds601_slist_delete.c lfds601_slist_get_and_set.c lfds601_slist_link.c lfds601_slist_new.c \\r
+ lfds601_stack_delete.c lfds601_stack_new.c lfds601_stack_push_pop.c lfds601_stack_query.c\r
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))\r
+\r
+##### tools #####\r
+MAKE = make\r
+MFLAGS = \r
+\r
+CC = cl\r
+CBASE = /nologo /W4 /WX /c "-I$(SRCDIR)" "-I$(INCDIR)" "/Fd$(BINDIR)\$(BINNAME).pdb" /DUNICODE /D_UNICODE /DWIN32_LEAN_AND_MEAN\r
+CFREL = /Ox /DNDEBUG\r
+CFDBG = /Od /Gm /Zi /D_DEBUG\r
+\r
+AR = lib\r
+AFLAGS = /nologo /subsystem:console /wx /verbose\r
+\r
+LD = link\r
+LFBASE = /dll /def:$(BINNAME).def /nologo /subsystem:console /wx /nodefaultlib /nxcompat\r
+LFREL = /incremental:no\r
+LFDBG = /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"\r
+\r
+##### variants #####\r
+CFLAGS = $(CBASE) $(CFDBG) /MTd\r
+LFLAGS = $(LFBASE) $(LFDBG)\r
+CLIB = libcmtd.lib\r
+\r
+ifeq ($(MAKECMDGOALS),librel)\r
+ CFLAGS = $(CBASE) $(CFREL) /MT\r
+ LFLAGS = $(LFBASE) $(LFREL)\r
+ CLIB = libcmt.lib\r
+endif\r
+\r
+ifeq ($(MAKECMDGOALS),libdbg)\r
+ CFLAGS = $(CBASE) $(CFDBG) /MTd\r
+ LFLAGS = $(LFBASE) $(LFDBG)\r
+ CLIB = libcmtd.lib\r
+endif\r
+\r
+ifeq ($(MAKECMDGOALS),dllrel)\r
+ CFLAGS = $(CBASE) $(CFREL) /MD\r
+ LFLAGS = $(LFBASE) $(LFREL)\r
+ CLIB = msvcrt.lib\r
+endif\r
+\r
+ifeq ($(MAKECMDGOALS),dlldbg)\r
+ CFLAGS = $(CBASE) $(CFDBG) /MDd\r
+ LFLAGS = $(LFBASE) $(LFDBG)\r
+ CLIB = msvcrtd.lib\r
+endif\r
+\r
+##### search paths #####\r
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))\r
+\r
+##### implicit rules #####\r
+$(OBJDIR)/%.obj : %.c\r
+ $(CC) $(CFLAGS) "/Fo$@" $<\r
+\r
+##### explicit rules #####\r
+$(LIB_BINARY) : $(OBJECTS)\r
+ $(AR) $(AFLAGS) $(OBJECTS) /out:$(LIB_BINARY)\r
+\r
+$(DLL_BINARY) : $(OBJECTS)\r
+ $(LD) $(LFLAGS) $(CLIB) $(OBJECTS) /out:$(DLL_BINARY)\r
+\r
+##### phony #####\r
+.PHONY : clean librel libdbg dllrel dlldbg\r
+\r
+clean : \r
+ @erase /Q $(BINDIR)\$(BINNAME).* $(OBJDIR)\*.obj $(QUIETLY)\r
+\r
+librel : $(LIB_BINARY)\r
+libdbg : $(LIB_BINARY)\r
+\r
+dllrel : $(DLL_BINARY)\r
+dlldbg : $(DLL_BINARY)\r
+\r
--- /dev/null
+introduction\r
+============\r
+Welcome to liblfds, a portable, license-free, lock-free data structure\r
+library written in C.\r
+\r
+platforms\r
+=========\r
+Currently liblfds out-of-the-box supports;\r
+\r
+Operating System CPU Toolset\r
+================ ========== =======\r
+Windows 64-bit IA64 & x64 1. Microsoft Visual Studio 2008\r
+ 2. Microsoft Windows SDK and GNUmake >= 3.8.1\r
+\r
+Windows 32-bit x64 & x86 1. Microsoft Visual Studio 2008\r
+ 2. Visual C++ 2008 Express Edition\r
+ 3. Microsoft Windows SDK and GNUmake >= 3.8.1\r
+\r
+Windows Kernel IA64, x64, 1. Windows Driver Kit >= 7.0.0\r
+ x86\r
+\r
+Linux 64-bit x64 1. GCC >= 4.1.0 and GNUmake >= 3.8.1\r
+\r
+Linux 32-bit x64, x86, 1. GCC >= 4.1.0 and GNUmake >= 3.8.1 \r
+ ARM\r
+\r
+data structures\r
+===============\r
+Currently liblfds provides the following;\r
+\r
+* Freelist\r
+* Queue\r
+* Ringbuffer\r
+* Singly linked list (logical delete only)\r
+* Stack\r
+\r
+liblfds on-line\r
+===============\r
+On the liblfds home page, you will find the blog, a bugzilla, a forum, a\r
+wikipedia and the current and all historical source releases.\r
+\r
+The wikipedia contains comprehensive documentation for development,\r
+building, testing and porting.\r
+\r
+http://www.liblfds.org\r
+\r
+license\r
+=======\r
+There is no license. You are free to use this code in any way.\r
+\r
+building\r
+========\r
+On Windows, depending on your target platform, one of the following toolchains\r
+is required;\r
+\r
+ * Microsoft Visual Studio 2008 (expensive)\r
+ * Visual C++ 2008 Express Edition (free, but no 64 bit support)\r
+ * Microsoft Windows SDK (free, no GUI, has 64 bit support) and GNUmake 3.81 \r
+\r
+On Windows (kernel-mode), the following toolchain is required; \r
+\r
+ * Windows Driver Kit 7.0.0 or later\r
+\r
+On Linux, the following toolchain is required;\r
+\r
+ * gcc 4.1.0 or later and GNUmake 3.81 \r
+\r
+For documentation, see the building guide in the wikipedia.\r
+\r
+using\r
+=====\r
+Once built, there is a single header file, /inc/liblfds.h, which you must include\r
+in your source code, and a single library file /bin/liblfds.*, where the suffix\r
+depends on your platform and your build choice (static or dynamic), to which,\r
+if statically built, you must link directly or, if dynamically built, you must\r
+arrange your system such that the library can be found by the loader at run-time. \r
+\r
+testing\r
+=======\r
+The library comes with a command line test and benchmark program. This\r
+program requires threads. As such, it is only suitable for platforms providing\r
+thread support and which can execute a command line binary. Currently this\r
+means the test and benchmark program works for all platforms except the Windows\r
+Kernel.\r
+\r
+For documentation, see the testing and benchmarking guide in the wikipedia.\r
+\r
+porting\r
+=======\r
+Both the test program and liblfds provide an abstraction layer which acts to\r
+mask platform differences. Porting is the act of implementing on your platform\r
+the functions which make up the abstraction layers. You do not need to port\r
+the test program to port liblfds, but obviously it is recommended, so you can\r
+test your port.\r
+\r
+To support liblfds, your platform must support either contigious double-word\r
+compare-and-swap (e.g. x86/x64) or contigious double-word load-link/conditional-store\r
+where normal loads cannot occur inside the LL/CS pair (e.g. ARM) or single word\r
+load-link/conditional-store where normal loads can occur inside the LL/CS pair.\r
+\r
+For documentation, see the porting guide in the wikipedia.\r
+\r
+release history\r
+===============\r
+release 1, 25th September 2009, svn revision 1574.\r
+ - initial release\r
+\r
+release 2, 5th October 2009, svn revision 1599.\r
+ - added abstraction layer for Windows kernel\r
+ - minor code tidyups/fixes\r
+\r
+release 3, 25th October 2009, svn revision 1652.\r
+ - added singly linked list (logical delete only)\r
+ - minor code tidyups/fixes\r
+\r
+release 4, 7th December 2009, svn revision 1716.\r
+ - added ARM support\r
+ - added benchmarking functionality to the test program\r
+ - fixed a profound and pervasive pointer\r
+ decleration bug; earlier releases of liblfds\r
+ *should not be used*\r
+\r
+release 5, 19th December 2009, svn revision 1738.\r
+ - fixed subtle queue bug, which also affected ringbuffer\r
+ and caused data re-ordering under high load\r
+ - added benchmarks for freelist, ringbuffer and stack\r
+\r
+release 6, 29th December 2009, svn revision 1746.\r
+ - fixed two implementation errors, which reduced performance,\r
+ spotted by Codeplug from "http://cboard.cprogramming.com".\r
+\r
+release 6.0.0, 18th December 2012, svn revision 2537\r
+ - introduction of namespaces, e.g. the "lfds601_" prefix\r
+ code otherwise COMPLETE AND WHOLLY UNCHANGED\r
+ this release is a stepping-stone to 6.1.0\r
+\r
+release 6.0.1, 2nd January 2013, svn revision 3296\r
+ - bug fix where an enum wasn't moved into the new namespacing policy\r
+\r
--- /dev/null
+The Windows kernel build environment is primitive and has a number\r
+of severe limitations; in particular, all source files must be in\r
+one directory and it is not possible to choose the output binary type\r
+(static or dynamic library) from the build command line; rather,\r
+a string has to be modified in a text file used by the build (!)\r
+\r
+To deal with these limitations, it is necessary for a Windows kernel\r
+build to run a batch file prior to building.\r
+\r
+There are two batch files, one for static library builds and the other\r
+for dynamic library builds.\r
+\r
+They are both idempotent; you can run them as often as you like and\r
+switch between them as often as you want. It's all fine; whenever\r
+you run one of them, it will take you from whatever state you were\r
+previously in, into the state you want to be in.\r
+\r
+Both batch files copy all the sources file into a single directory,\r
+"/src/single_dir_for_windows_kernel/".\r
+\r
+The static library batch file will then copy "/sources.static" into\r
+"/src/single_dir_for_windows_kernel/", which will cause a static\r
+library to be built.\r
+\r
+The dynamic library batch file will then copy "/sources.dynamic" into\r
+"/src/single_dir_for_windows_kernel/", which will cause a dynamic\r
+library to be built. It will also copy "src/driver_entry.c" into\r
+"/src/single_dir_for_windows_kernel/", since the linker requires\r
+the DriverEntry function to exist for dynamic libraries, even\r
+though it's not used.\r
+\r
+\r
--- /dev/null
+@echo off\r
+rmdir /q /s src\single_dir_for_windows_kernel 1>nul 2>nul\r
+mkdir src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds601_abstraction\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds601_freelist\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds601_queue\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds601_ringbuffer\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds601_slist\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds601_stack\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y sources.dynamic src\single_dir_for_windows_kernel\sources 1>nul 2>nul\r
+copy /y src\driver_entry.c src\single_dir_for_windows_kernel 1>nul 2>nul\r
+echo Windows kernel dynamic library build directory structure created.\r
+echo (Note the effects of this batch file are idempotent).\r
+\r
--- /dev/null
+@echo off\r
+rmdir /q /s src\single_dir_for_windows_kernel 1>nul 2>nul\r
+mkdir src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds601_abstraction\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds601_freelist\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds601_queue\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds601_ringbuffer\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds601_slist\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds601_stack\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y sources.static src\single_dir_for_windows_kernel\sources 1>nul 2>nul\r
+erase /f src\single_dir_for_windows_kernel\driver_entry.c 1>nul 2>nul\r
+echo Windows kernel static library build directory structure created.\r
+echo (Note the effects of this batch file are idempotent).\r
+\r
--- /dev/null
+MSC_WARNING_LEVEL = /WX /W4\r
+DLLDEF = ../../liblfds601.def\r
+TARGETNAME = liblfds601\r
+TARGETPATH = ../../bin/\r
+TARGETTYPE = EXPORT_DRIVER\r
+UMTYPE = nt\r
+USER_C_FLAGS = /DWIN_KERNEL_BUILD\r
+\r
+INCLUDES = ..;../../inc/\r
+SOURCES = lfds601_abstraction_aligned_free.c \\r
+ lfds601_abstraction_aligned_malloc.c \\r
+ lfds601_abstraction_cas.c \\r
+ lfds601_abstraction_dcas.c \\r
+ lfds601_abstraction_increment.c \\r
+ lfds601_freelist_delete.c \\r
+ lfds601_freelist_get_and_set.c \\r
+ lfds601_freelist_new.c \\r
+ lfds601_freelist_pop_push.c \\r
+ lfds601_freelist_query.c \\r
+ lfds601_queue_delete.c \\r
+ lfds601_queue_new.c \\r
+ lfds601_queue_query.c \\r
+ lfds601_queue_queue.c \\r
+ lfds601_ringbuffer_delete.c \\r
+ lfds601_ringbuffer_get_and_put.c \\r
+ lfds601_ringbuffer_new.c \\r
+ lfds601_ringbuffer_query.c \\r
+ lfds601_slist_delete.c \\r
+ lfds601_slist_get_and_set.c \\r
+ lfds601_slist_link.c \\r
+ lfds601_slist_new.c \\r
+ lfds601_stack_delete.c \\r
+ lfds601_stack_new.c \\r
+ lfds601_stack_push_pop.c \\r
+ lfds601_stack_query.c \\r
+ driver_entry.c\r
+\r
--- /dev/null
+MSC_WARNING_LEVEL = /WX /W4\r
+TARGETNAME = liblfds601\r
+TARGETPATH = ../../bin/\r
+TARGETTYPE = DRIVER_LIBRARY\r
+UMTYPE = nt\r
+USER_C_FLAGS = /DWIN_KERNEL_BUILD\r
+\r
+INCLUDES = ..;../../inc/\r
+SOURCES = lfds601_abstraction_aligned_free.c \\r
+ lfds601_abstraction_aligned_malloc.c \\r
+ lfds601_abstraction_cas.c \\r
+ lfds601_abstraction_dcas.c \\r
+ lfds601_abstraction_increment.c \\r
+ lfds601_freelist_delete.c \\r
+ lfds601_freelist_get_and_set.c \\r
+ lfds601_freelist_new.c \\r
+ lfds601_freelist_pop_push.c \\r
+ lfds601_freelist_query.c \\r
+ lfds601_queue_delete.c \\r
+ lfds601_queue_new.c \\r
+ lfds601_queue_query.c \\r
+ lfds601_queue_queue.c \\r
+ lfds601_ringbuffer_delete.c \\r
+ lfds601_ringbuffer_get_and_put.c \\r
+ lfds601_ringbuffer_new.c \\r
+ lfds601_ringbuffer_query.c \\r
+ lfds601_slist_delete.c \\r
+ lfds601_slist_get_and_set.c \\r
+ lfds601_slist_link.c \\r
+ lfds601_slist_new.c \\r
+ lfds601_stack_delete.c \\r
+ lfds601_stack_new.c \\r
+ lfds601_stack_push_pop.c \\r
+ lfds601_stack_query.c\r
+\r
--- /dev/null
+DIRS = single_dir_for_windows_kernel
+
+
--- /dev/null
+#include "lfds601_stack_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+NTSTATUS DriverEntry( struct _DRIVER_OBJECT *DriverObject, PUNICODE_STRING RegistryPath )\r
+{\r
+ return( STATUS_SUCCESS );\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
--- /dev/null
+This C file (driver_entry.c) is used when building a dynamic library for\r
+the Windows kernel. It exists to work around one of the limitations of\r
+that build environment. It is not used by any other build; just ignore it.\r
+\r
--- /dev/null
+#include "lfds601_abstraction_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ !WIN_KERNEL_BUILD indicates Windows user-mode\r
+ */\r
+\r
+ void lfds601_abstraction_aligned_free( void *memory )\r
+ {\r
+ _aligned_free( memory );\r
+\r
+ return;\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (_XOPEN_SOURCE >= 600)\r
+\r
+ /* TRD : any OS on any CPU with any compiler with POSIX 6.00 or better\r
+\r
+ _XOPEN_SOURCE is actually set by the user, not by the compiler\r
+ it is the way the user signals to the compiler what\r
+ level of POSIX should be available\r
+ (it assumes of course the compiler has support for the given level of POSIX requested)\r
+ */\r
+\r
+ void lfds601_abstraction_aligned_free( void *memory )\r
+ {\r
+ free( memory );\r
+\r
+ return;\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows (kernel) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ WIN_KERNEL_BUILD indicates Windows kernel\r
+ */\r
+\r
+ void lfds601_abstraction_aligned_free( void *memory )\r
+ {\r
+ ExFreePoolWithTag( memory, 'sdfl' );\r
+\r
+ return;\r
+ }\r
+\r
+#endif\r
+\r
--- /dev/null
+#include "lfds601_abstraction_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ !WIN_KERNEL_BUILD indicates Windows user-mode\r
+ */\r
+\r
+ void *lfds601_abstraction_aligned_malloc( size_t size, size_t align_in_bytes )\r
+ {\r
+ void\r
+ *rv;\r
+\r
+ rv = _aligned_malloc( size, align_in_bytes );\r
+\r
+ return( rv );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (_XOPEN_SOURCE >= 600)\r
+\r
+ /* TRD : any OS on any CPU with any compiler with POSIX 6.00 or better\r
+\r
+ _XOPEN_SOURCE is actually set by the user, not by the compiler\r
+ it is the way the user signals to the compiler what\r
+ level of POSIX should be available\r
+ (it assumes of course the compiler has support for the given level of POSIX requested)\r
+ */\r
+\r
+ void *lfds601_abstraction_aligned_malloc( size_t size, size_t align_in_bytes )\r
+ {\r
+ int\r
+ rv;\r
+\r
+ void\r
+ *memory;\r
+\r
+ rv = posix_memalign( &memory, align_in_bytes, size );\r
+\r
+ // TRD : posix_memalign returns 0 on success, docs do not say *memory == NULL on fail\r
+ if( rv != 0 )\r
+ memory = NULL;\r
+\r
+ return( memory );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows (kernel) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ WIN_KERNEL_BUILD indicates Windows kernel\r
+ */\r
+\r
+ void *lfds601_abstraction_aligned_malloc( size_t size, size_t align_in_bytes )\r
+ {\r
+ void\r
+ *rv;\r
+\r
+ /* TRD : ExAllocatePoolWithTag() allocates memory aligned on 8 bytes on 32-bit CPUs\r
+ and on 16 bytes on 64-bit CPUs, which is what we want\r
+\r
+ as such, align_in_bytes is not needed; we must refer to it to avoid the\r
+ compiler warning\r
+ */\r
+\r
+ align_in_bytes;\r
+\r
+ rv = ExAllocatePoolWithTag( NonPagedPool, size, 'sdfl' );\r
+\r
+ return( rv );\r
+ }\r
+\r
+#endif\r
+\r
--- /dev/null
+#include "lfds601_abstraction_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER)\r
+\r
+ /* TRD : 64 bit and 32 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ */\r
+\r
+ LFDS601_INLINE lfds601_atom_t lfds601_abstraction_cas( volatile lfds601_atom_t *destination, lfds601_atom_t exchange, lfds601_atom_t compare )\r
+ {\r
+ assert( destination != NULL );\r
+ // TRD : exchange can be any value in its range\r
+ // TRD : compare can be any value in its range\r
+\r
+ return( (lfds601_atom_t) _InterlockedCompareExchangePointer((void * volatile *) destination, (void *) exchange, (void *) compare) );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (!defined __arm__ && __GNUC__ >= 4 && __GNUC_MINOR__ >= 1 && __GNUC_PATCHLEVEL__ >= 0)\r
+\r
+ /* TRD : any OS on any CPU except ARM with GCC 4.1.0 or better\r
+\r
+ GCC 4.1.0 introduced the __sync_*() atomic intrinsics\r
+\r
+ __GNUC__ / __GNUC_MINOR__ / __GNUC_PATCHLEVEL__ indicates GCC and which version\r
+ */\r
+\r
+ LFDS601_INLINE lfds601_atom_t lfds601_abstraction_cas( volatile lfds601_atom_t *destination, lfds601_atom_t exchange, lfds601_atom_t compare )\r
+ {\r
+ assert( destination != NULL );\r
+ // TRD : exchange can be any value in its range\r
+ // TRD : compare can be any value in its range\r
+\r
+ // TRD : note the different argument order for the GCC instrinsic to the MSVC instrinsic\r
+\r
+ return( (lfds601_atom_t) __sync_val_compare_and_swap(destination, compare, exchange) );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined __arm__ && __GNUC__)\r
+\r
+ /* TRD : any OS on any ARM with GCC\r
+\r
+ Remember however we need to set into compare the original value of destination.\r
+\r
+ __arm__ indicates ARM\r
+ __GNUC__ indicates GCC\r
+ */\r
+\r
+ LFDS601_INLINE lfds601_atom_t lfds601_abstraction_cas( volatile lfds601_atom_t *destination, lfds601_atom_t exchange, lfds601_atom_t compare )\r
+ {\r
+ lfds601_atom_t\r
+ stored_flag,\r
+ original_destination;\r
+\r
+ assert( destination != NULL );\r
+ // TRD : exchange can be any value in its range\r
+ // TRD : compare can be any value in its range\r
+\r
+ /* TRD : this is a standard, plain CAS, vulnerable to ABA */\r
+\r
+ __asm__ __volatile__\r
+ (\r
+ " mov %[stored_flag], #1;" // put 1 into stored_flag\r
+ " mcr p15, 0, %[zero], c7, c10, 5;" // memory barrier (ARM v6 compatible)\r
+ "atomic_cas:;"\r
+ " ldrex %[original_destination], [%[destination]];" // load *destination into original_destination\r
+ " teq %[original_destination], %[compare];" // compare original_destination with compare\r
+ " bne exit;" // if not equal, exit\r
+ " strex %[stored_flag], %[exchange], [%[destination]];" // if equal, try to store exchange into *destination (on success, strex puts 0 into stored_flag)\r
+ " teq %[stored_flag], #0;" // check if stored_flag is 0\r
+ " bne atomic_cas;" // if not 0, retry (someone else touched *destination after we loaded but before we stored)\r
+ " mcr p15, 0, %[zero], c7, c10, 5;" // memory barrier (ARM v6 compatible)\r
+ "exit:;"\r
+\r
+ // output\r
+ : "+m" (*destination), [original_destination] "=&r" (original_destination), [stored_flag] "=&r" (stored_flag)\r
+\r
+ // input\r
+ : [destination] "r" (destination), [compare] "r" (compare), [exchange] "r" (exchange), [zero] "r" (0)\r
+\r
+ // clobbered\r
+ : "cc", "memory" // memory is clobbered because we issue a memory barrier\r
+ );\r
+\r
+ return( original_destination );\r
+ }\r
+\r
+#endif\r
+\r
--- /dev/null
+#include "lfds601_abstraction_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN64 && defined _MSC_VER)\r
+\r
+ /* TRD : 64 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN64 indicates 64 bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ */\r
+\r
+ LFDS601_INLINE unsigned char lfds601_abstraction_dcas( volatile lfds601_atom_t *destination, lfds601_atom_t *exchange, lfds601_atom_t *compare )\r
+ {\r
+ unsigned char\r
+ cas_result;\r
+\r
+ assert( destination != NULL );\r
+ assert( exchange != NULL );\r
+ assert( compare != NULL );\r
+\r
+ cas_result = _InterlockedCompareExchange128( (volatile __int64 *) destination, (__int64) *(exchange+1), (__int64) *exchange, (__int64 *) compare );\r
+\r
+ return( cas_result );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER)\r
+\r
+ /* TRD : 32 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler\r
+\r
+ (!defined _WIN64 && defined _WIN32) indicates 32 bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ */\r
+\r
+ LFDS601_INLINE unsigned char lfds601_abstraction_dcas( volatile lfds601_atom_t *destination, lfds601_atom_t *exchange, lfds601_atom_t *compare )\r
+ {\r
+ __int64\r
+ original_compare;\r
+\r
+ assert( destination != NULL );\r
+ assert( exchange != NULL );\r
+ assert( compare != NULL );\r
+\r
+ *(__int64 *) &original_compare = *(__int64 *) compare;\r
+\r
+ *(__int64 *) compare = _InterlockedCompareExchange64( (volatile __int64 *) destination, *(__int64 *) exchange, *(__int64 *) compare );\r
+\r
+ return( (unsigned char) (*(__int64 *) compare == *(__int64 *) &original_compare) );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined __x86_64__ && __GNUC__ && !defined __pic__)\r
+\r
+ /* TRD : any OS on x64 with GCC for statically linked code\r
+\r
+ __x86_64__ indicates x64\r
+ __GNUC__ indicates GCC\r
+ */\r
+\r
+ LFDS601_INLINE unsigned char lfds601_abstraction_dcas( volatile lfds601_atom_t *destination, lfds601_atom_t *exchange, lfds601_atom_t *compare )\r
+ {\r
+ unsigned char\r
+ cas_result;\r
+\r
+ assert( destination != NULL );\r
+ assert( exchange != NULL );\r
+ assert( compare != NULL );\r
+\r
+ __asm__ __volatile__\r
+ (\r
+ "lock;" // make cmpxchg16b atomic\r
+ "cmpxchg16b %0;" // cmpxchg16b sets ZF on success\r
+ "setz %3;" // if ZF set, set cas_result to 1\r
+\r
+ // output\r
+ : "+m" (*(volatile lfds601_atom_t (*)[2]) destination), "+a" (*compare), "+d" (*(compare+1)), "=q" (cas_result)\r
+\r
+ // input\r
+ : "b" (*exchange), "c" (*(exchange+1))\r
+\r
+ // clobbered\r
+ : "cc", "memory"\r
+ );\r
+\r
+ return( cas_result );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined __i686__ && __GNUC__ && !defined __pic__)\r
+\r
+ /* TRD : any OS on x86 with GCC for statically linked code\r
+\r
+ __i686__ indicates x86\r
+ __GNUC__ indicates GCC\r
+ */\r
+\r
+ LFDS601_INLINE unsigned char lfds601_abstraction_dcas( volatile lfds601_atom_t *destination, lfds601_atom_t *exchange, lfds601_atom_t *compare )\r
+ {\r
+ unsigned char\r
+ cas_result;\r
+\r
+ assert( destination != NULL );\r
+ assert( exchange != NULL );\r
+ assert( compare != NULL );\r
+\r
+ __asm__ __volatile__\r
+ (\r
+ "lock;" // make cmpxchg8b atomic\r
+ "cmpxchg8b %0;" // cmpxchg8b sets ZF on success\r
+ "setz %3;" // if ZF set, set cas_result to 1\r
+\r
+ // output\r
+ : "+m" (*(volatile lfds601_atom_t (*)[2]) destination), "+a" (*compare), "+d" (*(compare+1)), "=q" (cas_result)\r
+\r
+ // input\r
+ : "b" (*exchange), "c" (*(exchange+1))\r
+\r
+ // clobbered\r
+ : "cc", "memory"\r
+ );\r
+\r
+ return( cas_result );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined __x86_64__ && __GNUC__ && defined __pic__)\r
+\r
+ /* TRD : any OS on x64 with GCC for position independent code (e.g. a shared object)\r
+\r
+ __x86_64__ indicates x64\r
+ __GNUC__ indicates GCC\r
+ */\r
+\r
+ LFDS601_INLINE unsigned char lfds601_abstraction_dcas( volatile lfds601_atom_t *destination, lfds601_atom_t *exchange, lfds601_atom_t *compare )\r
+ {\r
+ unsigned char\r
+ cas_result;\r
+\r
+ assert( destination != NULL );\r
+ assert( exchange != NULL );\r
+ assert( compare != NULL );\r
+\r
+ /* TRD : with a shared object, we cannot clobber RBX\r
+ as such, we borrow RSI - we load half of the exchange value into it\r
+ then swap it with RBX\r
+ then do the compare-and-swap\r
+ then swap the original value of RBX back from RSI\r
+ */\r
+\r
+ __asm__ __volatile__\r
+ (\r
+ "xchg %%rsi, %%rbx;" // swap RBI and RBX \r
+ "lock;" // make cmpxchg16b atomic\r
+ "cmpxchg16b %0;" // cmpxchg16b sets ZF on success\r
+ "setz %3;" // if ZF set, set cas_result to 1\r
+ "xchg %%rbx, %%rsi;" // re-swap RBI and RBX\r
+\r
+ // output\r
+ : "+m" (*(volatile lfds601_atom_t (*)[2]) destination), "+a" (*compare), "+d" (*(compare+1)), "=q" (cas_result)\r
+\r
+ // input\r
+ : "S" (*exchange), "c" (*(exchange+1))\r
+\r
+ // clobbered\r
+ : "cc", "memory"\r
+ );\r
+\r
+ return( cas_result );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined __i686__ && __GNUC__ && defined __pic__)\r
+\r
+ /* TRD : any OS on x86 with GCC for position independent code (e.g. a shared object)\r
+\r
+ __i686__ indicates x86\r
+ __GNUC__ indicates GCC\r
+ */\r
+\r
+ LFDS601_INLINE unsigned char lfds601_abstraction_dcas( volatile lfds601_atom_t *destination, lfds601_atom_t *exchange, lfds601_atom_t *compare )\r
+ {\r
+ unsigned char\r
+ cas_result;\r
+\r
+ assert( destination != NULL );\r
+ assert( exchange != NULL );\r
+ assert( compare != NULL );\r
+\r
+ /* TRD : with a shared object, we cannot clobber EBX\r
+ as such, we borrow ESI - we load half of the exchange value into it\r
+ then swap it with EBX\r
+ then do the compare-and-swap\r
+ then swap the original value of EBX back from ESI\r
+ */\r
+\r
+ __asm__ __volatile__\r
+ (\r
+ "xchg %%esi, %%ebx;" // swap EBI and EBX\r
+ "lock;" // make cmpxchg8b atomic\r
+ "cmpxchg8b %0;" // cmpxchg8b sets ZF on success\r
+ "setz %3;" // if ZF set, set cas_result to 1\r
+ "xchg %%ebx, %%esi;" // re-swap EBI and EBX\r
+\r
+ // output\r
+ : "+m" (*(volatile lfds601_atom_t (*)[2]) destination), "+a" (*compare), "+d" (*(compare+1)), "=q" (cas_result)\r
+\r
+ // input\r
+ : "S" (*exchange), "c" (*(exchange+1))\r
+\r
+ // clobbered\r
+ : "cc", "memory"\r
+ );\r
+\r
+ return( cas_result );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined __arm__ && __GNUC__)\r
+\r
+ /* TRD : any OS on any ARM with GCC\r
+\r
+ Remember however we need to set into compare the original value of destination.\r
+\r
+ __arm__ indicates ARM\r
+ __GNUC__ indicates GCC\r
+ */\r
+\r
+ LFDS601_INLINE unsigned char lfds601_abstraction_dcas( volatile lfds601_atom_t *destination, lfds601_atom_t *exchange, lfds601_atom_t *compare )\r
+ {\r
+ lfds601_atom_t\r
+ *local_compare = compare,\r
+ stored_flag = 1;\r
+\r
+ register lfds601_atom_t\r
+ local_exchange_a __asm("r2"),\r
+ local_exchange_b __asm("r3"),\r
+ local_compare_a __asm("r4"),\r
+ local_compare_b __asm("r5"),\r
+ original_destination_a __asm("r6"),\r
+ original_destination_b __asm("r7");\r
+\r
+ assert( destination != NULL );\r
+ assert( exchange != NULL );\r
+ assert( compare != NULL );\r
+\r
+ /* TRD : some notes\r
+\r
+ the double word ldr and str instructions require contigous registers\r
+ where the first register is an even number\r
+\r
+ honouring this requirement requires us to specifically specify\r
+ the registers to use (which is why we're using register __asm("rN")\r
+ in the declerations above\r
+\r
+ the arguments to the function occupy registers r0, r1 and r2\r
+\r
+ we can use up to and including r8, but r9 can have a frame pointer in it\r
+\r
+ so we make a copy of compare (freeing up r2, so we can use it for a double\r
+ word load) but use destination (r0) and exchange (r1) directly\r
+\r
+ note LDRD and STRD became available in armv6k\r
+\r
+ apologies for the trickery with the mcr register variable - the code runs\r
+ out of registers on armv6k\r
+ */\r
+\r
+ __asm__ __volatile__\r
+ (\r
+ " mov %[stored_flag], #1;" // put 1 into stored_flag\r
+ " mov %[local_exchange_a], #0;" // borrow local_exchange_a for mcr, to save a register\r
+ " mcr p15, 0, %[local_exchange_a], c7, c10, 5;" // memory barrier (ARM v6 compatible)\r
+ " ldrd %[local_exchange_a], %[local_exchange_b], [%[exchange]];" // load exchange into local_exchange_a and local_exchange_b (which are r2 and r3, respectively)\r
+ " ldrd %[local_compare_a], %[local_compare_b], [%[local_compare]];" // load compare into local_compare_a and local_compare_b (which are r4 and r5, respectively)\r
+ "atomic_dcas:;"\r
+ " ldrexd %[original_destination_a], %[original_destination_b], [%[destination]];" // load destination into original_destination_a and original_destination_b (which are r6 and r7, respectively)\r
+ " teq %[original_destination_a], %[local_compare_a];" // compare the first word of destination with the first word of compare\r
+ " teqeq %[original_destination_b], %[local_compare_b];" // if they're equal, compare the second word of destination with the second word of compare\r
+ " bne exit;" // if either word of destination does not match its respective word of compare, exit\r
+ " strexd %[stored_flag], %[local_exchange_a], %[local_exchange_b], [%[destination]];" // if both words were equal, try to store local_exchange_a and local_exchange_b into *destination (on success, strexed puts 0 into stored_flag)\r
+ " teq %[stored_flag], #0;" // check if stored_flag is 0\r
+ " bne atomic_dcas;" // if not 0, retry (someone else touched *destination after we loaded but before we stored)\r
+ "exit:;"\r
+ " strd %[original_destination_a], %[original_destination_b], [%[local_compare]];" // whether or not the CAS swapped, we always write the original value of destination into *compare\r
+ " mov %[local_exchange_a], #0;" // borrow local_exchange_a for mcr, to save a register\r
+ " mcr p15, 0, %[local_exchange_a], c7, c10, 5;" // memory barrier (ARM v6 compatible)\r
+\r
+ // output\r
+ : "+m" (*(volatile lfds601_atom_t (*)[2]) destination), "+m" (*(lfds601_atom_t (*)[2]) local_compare),\r
+ [stored_flag] "+&r" (stored_flag),\r
+ [original_destination_a] "+&r" (original_destination_a), [original_destination_b] "+&r" (original_destination_b),\r
+ [local_compare_a] "+&r" (local_compare_a), [local_compare_b] "+&r" (local_compare_b),\r
+ [local_exchange_a] "+&r" (local_exchange_a), [local_exchange_b] "+&r" (local_exchange_b)\r
+\r
+ // input\r
+ : "m" (*(lfds601_atom_t (*)[2]) exchange),\r
+ [destination] "r" (destination),\r
+ [local_compare] "r" (local_compare),\r
+ [exchange] "r" (exchange)\r
+\r
+ // clobbered\r
+ : "cc", "memory" // memory is clobbered because we issue a memory barrier\r
+ );\r
+\r
+ /* TRD : stored_flag is set to 0 on store, 1 on fail\r
+ we need to return 1 on success, 0 on fail\r
+ */\r
+\r
+ return( (unsigned char) !stored_flag );\r
+ }\r
+\r
+#endif\r
+\r
+\r
--- /dev/null
+#include "lfds601_abstraction_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN64 && defined _MSC_VER)\r
+\r
+ /* TRD : 64 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN64 indicates 64 bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ */\r
+\r
+ LFDS601_INLINE lfds601_atom_t lfds601_abstraction_increment( lfds601_atom_t *value )\r
+ {\r
+ __int64\r
+ rv;\r
+\r
+ assert( value != NULL );\r
+\r
+ rv = _InterlockedIncrement64( (__int64 *) value );\r
+\r
+ return( (lfds601_atom_t) rv );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER)\r
+\r
+ /* TRD : 32 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler\r
+\r
+ (!defined _WIN64 && defined _WIN32) indicates 32 bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ */\r
+\r
+ LFDS601_INLINE lfds601_atom_t lfds601_abstraction_increment( lfds601_atom_t *value )\r
+ {\r
+ long int\r
+ rv;\r
+\r
+ assert( value != NULL );\r
+\r
+ rv = _InterlockedIncrement( (long int *) value );\r
+\r
+ return( (lfds601_atom_t) rv );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (!defined __arm__ && __GNUC__ >= 4 && __GNUC_MINOR__ >= 1 && __GNUC_PATCHLEVEL__ >= 0)\r
+\r
+ /* TRD : any OS on any CPU with GCC 4.1.0 or better\r
+\r
+ GCC 4.1.0 introduced the __sync_*() atomic intrinsics\r
+\r
+ __GNUC__ / __GNUC_MINOR__ / __GNUC_PATCHLEVEL__ indicates GCC and which version\r
+ */\r
+\r
+ LFDS601_INLINE lfds601_atom_t lfds601_abstraction_increment( lfds601_atom_t *value )\r
+ {\r
+ lfds601_atom_t\r
+ rv;\r
+\r
+ assert( value != NULL );\r
+\r
+ // TRD : no need for casting here, GCC has a __sync_add_and_fetch() for all native types\r
+\r
+ rv = __sync_add_and_fetch( value, 1 );\r
+\r
+ return( rv );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined __arm__ && __GNUC__ >= 4)\r
+\r
+ /* TRD : any OS on any CPU with GCC 4.1.0 or better\r
+\r
+ GCC 4.1.0 introduced the __sync_*() atomic intrinsics\r
+\r
+ __arm__ indicates ARM\r
+ __GNUC__ indicates GCC\r
+ */\r
+\r
+ LFDS601_INLINE lfds601_atom_t lfds601_abstraction_increment( lfds601_atom_t *value )\r
+ {\r
+ lfds601_atom_t\r
+ stored_flag = 0,\r
+ new_value = 0;\r
+\r
+ assert( value != NULL );\r
+\r
+ __asm__ __volatile__\r
+ (\r
+ " mov %[stored_flag], #1;" // move 1 into stored_flag\r
+ " mcr p15, 0, %[zero], c7, c10, 5;" // memory barrier (ARM v6 compatible)\r
+ "atomic_add:;"\r
+ " ldrex %[new_value], [%[value]]; " // load *value into new_value\r
+ " add %[new_value], #1;" // add 1 to new_value\r
+ " strex %[stored_flag], %[new_value], [%[value]];" // try to store new_value into *value (on success, strex puts 0 into stored_flag)\r
+ " teq %[stored_flag], #0;" // check if stored_flag is 0\r
+ " bne atomic_add;" // if not 0, retry (someone else touched *value after we loaded but before we stored)\r
+ " mcr p15, 0, %[zero], c7, c10, 5;" // memory barrier (ARM v6 compatible)\r
+\r
+ // output\r
+ : "+m" (*value), [new_value] "+&r" (new_value), [stored_flag] "+&r" (stored_flag)\r
+\r
+ // input\r
+ : [value] "r" (value), [zero] "r" (0)\r
+\r
+ // clobbered\r
+ : "cc", "memory" // memory is clobbered because we issue a memory barrier\r
+ );\r
+\r
+ return( new_value );\r
+ }\r
+\r
+#endif\r
+\r
--- /dev/null
+/***** the library wide include file *****/\r
+#include "lfds601_internal.h"\r
+\r
+/***** private prototypes *****/\r
+\r
--- /dev/null
+#include "lfds601_freelist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_freelist_delete( struct lfds601_freelist_state *fs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ void\r
+ *user_data;\r
+\r
+ assert( fs != NULL );\r
+ // TRD : user_data_delete_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ while( lfds601_freelist_pop(fs, &fe) )\r
+ {\r
+ if( user_data_delete_function != NULL )\r
+ {\r
+ lfds601_freelist_get_user_data_from_element( fe, &user_data );\r
+ user_data_delete_function( user_data, user_state );\r
+ }\r
+\r
+ lfds601_abstraction_aligned_free( fe );\r
+ }\r
+\r
+ lfds601_abstraction_aligned_free( fs );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds601_freelist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void *lfds601_freelist_get_user_data_from_element( struct lfds601_freelist_element *fe, void **user_data )\r
+{\r
+ assert( fe != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ if( user_data != NULL )\r
+ *user_data = fe->user_data;\r
+\r
+ return( fe->user_data );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_freelist_set_user_data_in_element( struct lfds601_freelist_element *fe, void *user_data )\r
+{\r
+ assert( fe != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ fe->user_data = user_data;\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+/***** the library wide include file *****/\r
+#include "lfds601_internal.h"\r
+\r
+/***** defines *****/\r
+#define LFDS601_FREELIST_POINTER 0\r
+#define LFDS601_FREELIST_COUNTER 1\r
+#define LFDS601_FREELIST_PAC_SIZE 2\r
+\r
+/***** structures *****/\r
+#pragma pack( push, LFDS601_ALIGN_DOUBLE_POINTER )\r
+\r
+struct lfds601_freelist_state\r
+{\r
+ struct lfds601_freelist_element\r
+ *volatile top[LFDS601_FREELIST_PAC_SIZE];\r
+\r
+ int\r
+ (*user_data_init_function)( void **user_data, void *user_state );\r
+\r
+ void\r
+ *user_state;\r
+\r
+ lfds601_atom_t\r
+ aba_counter,\r
+ element_count;\r
+};\r
+\r
+struct lfds601_freelist_element\r
+{\r
+ struct lfds601_freelist_element\r
+ *next[LFDS601_FREELIST_PAC_SIZE];\r
+\r
+ void\r
+ *user_data;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
+/***** private prototypes *****/\r
+lfds601_atom_t lfds601_freelist_internal_new_element( struct lfds601_freelist_state *fs, struct lfds601_freelist_element **fe );\r
+void lfds601_freelist_internal_validate( struct lfds601_freelist_state *fs, struct lfds601_validation_info *vi, enum lfds601_data_structure_validity *lfds601_freelist_validity );\r
+\r
--- /dev/null
+#include "lfds601_freelist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds601_freelist_new( struct lfds601_freelist_state **fs, lfds601_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ lfds601_atom_t\r
+ element_count;\r
+\r
+ assert( fs != NULL );\r
+ // TRD : number_elements can be any value in its range\r
+ // TRD : user_data_init_function can be NULL\r
+\r
+ *fs = (struct lfds601_freelist_state *) lfds601_abstraction_aligned_malloc( sizeof(struct lfds601_freelist_state), LFDS601_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( (*fs) != NULL )\r
+ {\r
+ (*fs)->top[LFDS601_FREELIST_POINTER] = NULL;\r
+ (*fs)->top[LFDS601_FREELIST_COUNTER] = 0;\r
+ (*fs)->user_data_init_function = user_data_init_function;\r
+ (*fs)->user_state = user_state;\r
+ (*fs)->aba_counter = 0;\r
+ (*fs)->element_count = 0;\r
+\r
+ element_count = lfds601_freelist_new_elements( *fs, number_elements );\r
+\r
+ if( element_count == number_elements )\r
+ rv = 1;\r
+\r
+ if( element_count != number_elements )\r
+ {\r
+ lfds601_abstraction_aligned_free( (*fs) );\r
+ *fs = NULL;\r
+ }\r
+ }\r
+\r
+ return( rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+lfds601_atom_t lfds601_freelist_new_elements( struct lfds601_freelist_state *fs, lfds601_atom_t number_elements )\r
+{\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ lfds601_atom_t\r
+ loop,\r
+ count = 0;\r
+\r
+ assert( fs != NULL );\r
+ // TRD : number_elements can be any value in its range\r
+ // TRD : user_data_init_function can be NULL\r
+\r
+ for( loop = 0 ; loop < number_elements ; loop++ )\r
+ if( lfds601_freelist_internal_new_element(fs, &fe) )\r
+ {\r
+ lfds601_freelist_push( fs, fe );\r
+ count++;\r
+ }\r
+\r
+ return( count );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+lfds601_atom_t lfds601_freelist_internal_new_element( struct lfds601_freelist_state *fs, struct lfds601_freelist_element **fe )\r
+{\r
+ lfds601_atom_t\r
+ rv = 0;\r
+\r
+ assert( fs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ /* TRD : basically, does what you'd expect;\r
+\r
+ allocates an element\r
+ calls the user init function\r
+ if anything fails, cleans up,\r
+ sets *fe to NULL\r
+ and returns 0\r
+ */\r
+\r
+ *fe = (struct lfds601_freelist_element *) lfds601_abstraction_aligned_malloc( sizeof(struct lfds601_freelist_element), LFDS601_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( *fe != NULL )\r
+ {\r
+ if( fs->user_data_init_function == NULL )\r
+ {\r
+ (*fe)->user_data = NULL;\r
+ rv = 1;\r
+ }\r
+\r
+ if( fs->user_data_init_function != NULL )\r
+ {\r
+ rv = fs->user_data_init_function( &(*fe)->user_data, fs->user_state );\r
+\r
+ if( rv == 0 )\r
+ {\r
+ lfds601_abstraction_aligned_free( *fe );\r
+ *fe = NULL;\r
+ }\r
+ }\r
+ }\r
+\r
+ if( rv == 1 )\r
+ lfds601_abstraction_increment( (lfds601_atom_t *) &fs->element_count );\r
+\r
+ return( rv );\r
+}\r
+\r
--- /dev/null
+#include "lfds601_freelist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds601_freelist_element *lfds601_freelist_pop( struct lfds601_freelist_state *fs, struct lfds601_freelist_element **fe )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) struct lfds601_freelist_element\r
+ *fe_local[LFDS601_FREELIST_PAC_SIZE];\r
+\r
+ assert( fs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ fe_local[LFDS601_FREELIST_COUNTER] = fs->top[LFDS601_FREELIST_COUNTER];\r
+ fe_local[LFDS601_FREELIST_POINTER] = fs->top[LFDS601_FREELIST_POINTER];\r
+\r
+ /* TRD : note that lfds601_abstraction_dcas loads the original value of the destination (fs->top) into the compare (fe_local)\r
+ (this happens of course after the CAS itself has occurred inside lfds601_abstraction_dcas)\r
+ */\r
+\r
+ do\r
+ {\r
+ if( fe_local[LFDS601_FREELIST_POINTER] == NULL )\r
+ {\r
+ *fe = NULL;\r
+ return( *fe );\r
+ }\r
+ }\r
+ while( 0 == lfds601_abstraction_dcas((volatile lfds601_atom_t *) fs->top, (lfds601_atom_t *) fe_local[LFDS601_FREELIST_POINTER]->next, (lfds601_atom_t *) fe_local) );\r
+\r
+ *fe = (struct lfds601_freelist_element *) fe_local[LFDS601_FREELIST_POINTER];\r
+\r
+ return( *fe );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds601_freelist_element *lfds601_freelist_guaranteed_pop( struct lfds601_freelist_state *fs, struct lfds601_freelist_element **fe )\r
+{\r
+ assert( fs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ lfds601_freelist_internal_new_element( fs, fe );\r
+\r
+ return( *fe );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_freelist_push( struct lfds601_freelist_state *fs, struct lfds601_freelist_element *fe )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) struct lfds601_freelist_element\r
+ *fe_local[LFDS601_FREELIST_PAC_SIZE],\r
+ *original_fe_next[LFDS601_FREELIST_PAC_SIZE];\r
+\r
+ assert( fs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ fe_local[LFDS601_FREELIST_POINTER] = fe;\r
+ fe_local[LFDS601_FREELIST_COUNTER] = (struct lfds601_freelist_element *) lfds601_abstraction_increment( (lfds601_atom_t *) &fs->aba_counter );\r
+\r
+ original_fe_next[LFDS601_FREELIST_POINTER] = fs->top[LFDS601_FREELIST_POINTER];\r
+ original_fe_next[LFDS601_FREELIST_COUNTER] = fs->top[LFDS601_FREELIST_COUNTER];\r
+\r
+ /* TRD : note that lfds601_abstraction_dcas loads the original value of the destination (fs->top) into the compare (original_fe_next)\r
+ (this happens of course after the CAS itself has occurred inside lfds601_abstraction_dcas)\r
+ this then causes us in our loop, should we repeat it, to update fe_local->next to a more\r
+ up-to-date version of the head of the lfds601_freelist\r
+ */\r
+\r
+ do\r
+ {\r
+ fe_local[LFDS601_FREELIST_POINTER]->next[LFDS601_FREELIST_POINTER] = original_fe_next[LFDS601_FREELIST_POINTER];\r
+ fe_local[LFDS601_FREELIST_POINTER]->next[LFDS601_FREELIST_COUNTER] = original_fe_next[LFDS601_FREELIST_COUNTER];\r
+ }\r
+ while( 0 == lfds601_abstraction_dcas((volatile lfds601_atom_t *) fs->top, (lfds601_atom_t *) fe_local, (lfds601_atom_t *) original_fe_next) );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds601_freelist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_freelist_query( struct lfds601_freelist_state *fs, enum lfds601_freelist_query_type query_type, void *query_input, void *query_output )\r
+{\r
+ assert( fs != NULL );\r
+ // TRD : query type can be any value in its range\r
+ // TRD : query_input can be NULL in some cases\r
+ assert( query_output != NULL );\r
+\r
+ switch( query_type )\r
+ {\r
+ case LFDS601_FREELIST_QUERY_ELEMENT_COUNT:\r
+ assert( query_input == NULL );\r
+\r
+ *(lfds601_atom_t *) query_output = fs->element_count;\r
+ break;\r
+\r
+ case LFDS601_FREELIST_QUERY_VALIDATE:\r
+ // TRD : query_input can be NULL\r
+\r
+ lfds601_freelist_internal_validate( fs, (struct lfds601_validation_info *) query_input, (enum lfds601_data_structure_validity *) query_output );\r
+ break;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_freelist_internal_validate( struct lfds601_freelist_state *fs, struct lfds601_validation_info *vi, enum lfds601_data_structure_validity *lfds601_freelist_validity )\r
+{\r
+ struct lfds601_freelist_element\r
+ *fe,\r
+ *fe_slow,\r
+ *fe_fast;\r
+\r
+ lfds601_atom_t\r
+ element_count = 0;\r
+\r
+ assert( fs != NULL );\r
+ // TRD : vi can be NULL\r
+ assert( lfds601_freelist_validity != NULL );\r
+\r
+ *lfds601_freelist_validity = LFDS601_VALIDITY_VALID;\r
+\r
+ fe_slow = fe_fast = (struct lfds601_freelist_element *) fs->top[LFDS601_FREELIST_POINTER];\r
+\r
+ /* TRD : first, check for a loop\r
+ we have two pointers\r
+ both of which start at the top of the lfds601_freelist\r
+ we enter a loop\r
+ and on each iteration\r
+ we advance one pointer by one element\r
+ and the other by two\r
+\r
+ we exit the loop when both pointers are NULL\r
+ (have reached the end of the lfds601_freelist)\r
+\r
+ or\r
+\r
+ if we fast pointer 'sees' the slow pointer\r
+ which means we have a loop\r
+ */\r
+\r
+ if( fe_slow != NULL )\r
+ do\r
+ {\r
+ fe_slow = fe_slow->next[LFDS601_FREELIST_POINTER];\r
+\r
+ if( fe_fast != NULL )\r
+ fe_fast = fe_fast->next[LFDS601_FREELIST_POINTER];\r
+\r
+ if( fe_fast != NULL )\r
+ fe_fast = fe_fast->next[LFDS601_FREELIST_POINTER];\r
+ }\r
+ while( fe_slow != NULL and fe_fast != fe_slow );\r
+\r
+ if( fe_fast != NULL and fe_slow != NULL and fe_fast == fe_slow )\r
+ *lfds601_freelist_validity = LFDS601_VALIDITY_INVALID_LOOP;\r
+\r
+ /* TRD : now check for expected number of elements\r
+ vi can be NULL, in which case we do not check\r
+ we know we don't have a loop from our earlier check\r
+ */\r
+\r
+ if( *lfds601_freelist_validity == LFDS601_VALIDITY_VALID and vi != NULL )\r
+ {\r
+ fe = (struct lfds601_freelist_element *) fs->top[LFDS601_FREELIST_POINTER];\r
+\r
+ while( fe != NULL )\r
+ {\r
+ element_count++;\r
+ fe = (struct lfds601_freelist_element *) fe->next[LFDS601_FREELIST_POINTER];\r
+ }\r
+\r
+ if( element_count < vi->min_elements )\r
+ *lfds601_freelist_validity = LFDS601_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ if( element_count > vi->max_elements )\r
+ *lfds601_freelist_validity = LFDS601_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+/***** public prototypes *****/\r
+#include "liblfds601.h"\r
+\r
+/***** defines *****/\r
+#define and &&\r
+#define or ||\r
+\r
+#define RAISED 1\r
+#define LOWERED 0\r
+\r
+#define NO_FLAGS 0x0\r
+\r
--- /dev/null
+#include "lfds601_queue_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_queue_delete( struct lfds601_queue_state *qs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ void\r
+ *user_data;\r
+\r
+ assert( qs != NULL );\r
+ // TRD : user_data_delete_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ while( lfds601_queue_dequeue(qs, &user_data) )\r
+ if( user_data_delete_function != NULL )\r
+ user_data_delete_function( user_data, user_state );\r
+\r
+ /* TRD : fully dequeuing will leave us\r
+ with a single dummy element\r
+ which both qs->enqueue and qs->dequeue point at\r
+ we push this back onto the lfds601_freelist\r
+ before we delete the lfds601_freelist\r
+ */\r
+\r
+ lfds601_freelist_push( qs->fs, qs->enqueue[LFDS601_QUEUE_POINTER]->fe );\r
+\r
+ lfds601_freelist_delete( qs->fs, lfds601_queue_internal_freelist_delete_function, NULL );\r
+\r
+ lfds601_abstraction_aligned_free( qs );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds601_queue_internal_freelist_delete_function( void *user_data, void *user_state )\r
+{\r
+ assert( user_data != NULL );\r
+ assert( user_state == NULL );\r
+\r
+ lfds601_abstraction_aligned_free( user_data );\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
--- /dev/null
+/***** the library wide include file *****/\r
+#include "lfds601_internal.h"\r
+\r
+/***** pragmas *****/\r
+\r
+/***** defines *****/\r
+#define LFDS601_QUEUE_STATE_UNKNOWN -1\r
+#define LFDS601_QUEUE_STATE_EMPTY 0\r
+#define LFDS601_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE 1\r
+#define LFDS601_QUEUE_STATE_ATTEMPT_DELFDS601_QUEUE 2\r
+\r
+#define LFDS601_QUEUE_POINTER 0\r
+#define LFDS601_QUEUE_COUNTER 1\r
+#define LFDS601_QUEUE_PAC_SIZE 2\r
+\r
+/***** structures *****/\r
+#pragma pack( push, LFDS601_ALIGN_DOUBLE_POINTER )\r
+\r
+struct lfds601_queue_state\r
+{\r
+ struct lfds601_queue_element\r
+ *volatile enqueue[LFDS601_QUEUE_PAC_SIZE],\r
+ *volatile dequeue[LFDS601_QUEUE_PAC_SIZE];\r
+\r
+ lfds601_atom_t\r
+ aba_counter;\r
+\r
+ struct lfds601_freelist_state\r
+ *fs;\r
+};\r
+\r
+struct lfds601_queue_element\r
+{\r
+ // TRD : next in a lfds601_queue requires volatile as it is target of CAS\r
+ struct lfds601_queue_element\r
+ *volatile next[LFDS601_QUEUE_PAC_SIZE];\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ void\r
+ *user_data;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
+/***** externs *****/\r
+\r
+/***** private prototypes *****/\r
+int lfds601_queue_internal_freelist_init_function( void **user_data, void *user_state );\r
+void lfds601_queue_internal_freelist_delete_function( void *user_data, void *user_state );\r
+\r
+void lfds601_queue_internal_new_element_from_freelist( struct lfds601_queue_state *qs, struct lfds601_queue_element *qe[LFDS601_QUEUE_PAC_SIZE], void *user_data );\r
+void lfds601_queue_internal_guaranteed_new_element_from_freelist( struct lfds601_queue_state *qs, struct lfds601_queue_element * qe[LFDS601_QUEUE_PAC_SIZE], void *user_data );\r
+void lfds601_queue_internal_init_element( struct lfds601_queue_state *qs, struct lfds601_queue_element *qe[LFDS601_QUEUE_PAC_SIZE], struct lfds601_freelist_element *fe, void *user_data );\r
+\r
+void lfds601_queue_internal_queue( struct lfds601_queue_state *qs, struct lfds601_queue_element *qe[LFDS601_QUEUE_PAC_SIZE] );\r
+\r
+void lfds601_queue_internal_validate( struct lfds601_queue_state *qs, struct lfds601_validation_info *vi, enum lfds601_data_structure_validity *lfds601_queue_validity, enum lfds601_data_structure_validity *lfds601_freelist_validity );\r
+\r
--- /dev/null
+#include "lfds601_queue_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds601_queue_new( struct lfds601_queue_state **qs, lfds601_atom_t number_elements )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ struct lfds601_queue_element\r
+ *qe[LFDS601_QUEUE_PAC_SIZE];\r
+\r
+ assert( qs != NULL );\r
+ // TRD : number_elements can be any value in its range\r
+\r
+ *qs = (struct lfds601_queue_state *) lfds601_abstraction_aligned_malloc( sizeof(struct lfds601_queue_state), LFDS601_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( *qs != NULL )\r
+ {\r
+ // TRD : the size of the lfds601_freelist is the size of the lfds601_queue (+1 for the leading dummy element, which is hidden from the caller)\r
+ lfds601_freelist_new( &(*qs)->fs, number_elements+1, lfds601_queue_internal_freelist_init_function, NULL );\r
+\r
+ if( (*qs)->fs != NULL )\r
+ {\r
+ lfds601_queue_internal_new_element_from_freelist( *qs, qe, NULL );\r
+ (*qs)->enqueue[LFDS601_QUEUE_POINTER] = (*qs)->dequeue[LFDS601_QUEUE_POINTER] = qe[LFDS601_QUEUE_POINTER];\r
+ (*qs)->aba_counter = 0;\r
+ rv = 1;\r
+ }\r
+\r
+ if( (*qs)->fs == NULL )\r
+ {\r
+ lfds601_abstraction_aligned_free( *qs );\r
+ *qs = NULL;\r
+ }\r
+ }\r
+\r
+ return( rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+int lfds601_queue_internal_freelist_init_function( void **user_data, void *user_state )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ assert( user_data != NULL );\r
+ assert( user_state == NULL );\r
+\r
+ *user_data = lfds601_abstraction_aligned_malloc( sizeof(struct lfds601_queue_element), LFDS601_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( *user_data != NULL )\r
+ rv = 1;\r
+\r
+ return( rv );\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_queue_internal_new_element_from_freelist( struct lfds601_queue_state *qs, struct lfds601_queue_element *qe[LFDS601_QUEUE_PAC_SIZE], void *user_data )\r
+{\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ assert( qs != NULL );\r
+ assert( qe != NULL );\r
+ // TRD : user_data can be any value in its range\r
+\r
+ qe[LFDS601_QUEUE_POINTER] = NULL;\r
+\r
+ lfds601_freelist_pop( qs->fs, &fe );\r
+\r
+ if( fe != NULL )\r
+ lfds601_queue_internal_init_element( qs, qe, fe, user_data );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_queue_internal_guaranteed_new_element_from_freelist( struct lfds601_queue_state *qs, struct lfds601_queue_element *qe[LFDS601_QUEUE_PAC_SIZE], void *user_data )\r
+{\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ assert( qs != NULL );\r
+ assert( qe != NULL );\r
+ // TRD : user_data can be any value in its range\r
+\r
+ qe[LFDS601_QUEUE_POINTER] = NULL;\r
+\r
+ lfds601_freelist_guaranteed_pop( qs->fs, &fe );\r
+\r
+ if( fe != NULL )\r
+ lfds601_queue_internal_init_element( qs, qe, fe, user_data );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_queue_internal_init_element( struct lfds601_queue_state *qs, struct lfds601_queue_element *qe[LFDS601_QUEUE_PAC_SIZE], struct lfds601_freelist_element *fe, void *user_data )\r
+{\r
+ assert( qs != NULL );\r
+ assert( qe != NULL );\r
+ assert( fe != NULL );\r
+ // TRD : user_data can be any value in its range\r
+\r
+ lfds601_freelist_get_user_data_from_element( fe, (void **) &qe[LFDS601_QUEUE_POINTER] );\r
+ qe[LFDS601_QUEUE_COUNTER] = (struct lfds601_queue_element *) lfds601_abstraction_increment( (lfds601_atom_t *) &qs->aba_counter );\r
+\r
+ qe[LFDS601_QUEUE_POINTER]->next[LFDS601_QUEUE_POINTER] = NULL;\r
+ qe[LFDS601_QUEUE_POINTER]->next[LFDS601_QUEUE_COUNTER] = (struct lfds601_queue_element *) lfds601_abstraction_increment( (lfds601_atom_t *) &qs->aba_counter );\r
+\r
+ qe[LFDS601_QUEUE_POINTER]->fe = fe;\r
+ qe[LFDS601_QUEUE_POINTER]->user_data = user_data;\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds601_queue_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds601_queue_query( struct lfds601_queue_state *qs, enum lfds601_queue_query_type query_type, void *query_input, void *query_output )\r
+{\r
+ assert( qs != NULL );\r
+ // TRD : query_type can be any value in its range\r
+ // TRD : query_input can be NULL\r
+ assert( query_output != NULL );\r
+\r
+ switch( query_type )\r
+ {\r
+ case LFDS601_QUEUE_QUERY_ELEMENT_COUNT:\r
+ assert( query_input == NULL );\r
+\r
+ lfds601_freelist_query( qs->fs, LFDS601_FREELIST_QUERY_ELEMENT_COUNT, NULL, query_output );\r
+ break;\r
+\r
+ case LFDS601_QUEUE_QUERY_VALIDATE:\r
+ // TRD : query_input can be NULL\r
+\r
+ lfds601_queue_internal_validate( qs, (struct lfds601_validation_info *) query_input, (enum lfds601_data_structure_validity *) query_output, ((enum lfds601_data_structure_validity *) query_output)+1 );\r
+ break;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_queue_internal_validate( struct lfds601_queue_state *qs, struct lfds601_validation_info *vi, enum lfds601_data_structure_validity *lfds601_queue_validity, enum lfds601_data_structure_validity *lfds601_freelist_validity )\r
+{\r
+ struct lfds601_queue_element\r
+ *qe,\r
+ *qe_slow,\r
+ *qe_fast;\r
+\r
+ lfds601_atom_t\r
+ element_count = 0,\r
+ total_elements;\r
+\r
+ struct lfds601_validation_info\r
+ lfds601_freelist_vi;\r
+\r
+ assert( qs != NULL );\r
+ // TRD : vi can be NULL\r
+ assert( lfds601_queue_validity != NULL );\r
+ assert( lfds601_freelist_validity != NULL );\r
+\r
+ *lfds601_queue_validity = LFDS601_VALIDITY_VALID;\r
+\r
+ qe_slow = qe_fast = (struct lfds601_queue_element *) qs->dequeue[LFDS601_QUEUE_POINTER];\r
+\r
+ /* TRD : first, check for a loop\r
+ we have two pointers\r
+ both of which start at the dequeue end of the lfds601_queue\r
+ we enter a loop\r
+ and on each iteration\r
+ we advance one pointer by one element\r
+ and the other by two\r
+\r
+ we exit the loop when both pointers are NULL\r
+ (have reached the end of the lfds601_queue)\r
+\r
+ or\r
+\r
+ if we fast pointer 'sees' the slow pointer\r
+ which means we have a loop\r
+ */\r
+\r
+ if( qe_slow != NULL )\r
+ do\r
+ {\r
+ qe_slow = qe_slow->next[LFDS601_QUEUE_POINTER];\r
+\r
+ if( qe_fast != NULL )\r
+ qe_fast = qe_fast->next[LFDS601_QUEUE_POINTER];\r
+\r
+ if( qe_fast != NULL )\r
+ qe_fast = qe_fast->next[LFDS601_QUEUE_POINTER];\r
+ }\r
+ while( qe_slow != NULL and qe_fast != qe_slow );\r
+\r
+ if( qe_fast != NULL and qe_slow != NULL and qe_fast == qe_slow )\r
+ *lfds601_queue_validity = LFDS601_VALIDITY_INVALID_LOOP;\r
+\r
+ /* TRD : now check for expected number of elements\r
+ vi can be NULL, in which case we do not check\r
+ we know we don't have a loop from our earlier check\r
+ */\r
+\r
+ if( *lfds601_queue_validity == LFDS601_VALIDITY_VALID and vi != NULL )\r
+ {\r
+ qe = (struct lfds601_queue_element *) qs->dequeue[LFDS601_QUEUE_POINTER];\r
+\r
+ while( qe != NULL )\r
+ {\r
+ element_count++;\r
+ qe = (struct lfds601_queue_element *) qe->next[LFDS601_QUEUE_POINTER];\r
+ }\r
+\r
+ /* TRD : remember there is a dummy element in the lfds601_queue */\r
+ element_count--;\r
+\r
+ if( element_count < vi->min_elements )\r
+ *lfds601_queue_validity = LFDS601_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ if( element_count > vi->max_elements )\r
+ *lfds601_queue_validity = LFDS601_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+ }\r
+\r
+ /* TRD : now we validate the lfds601_freelist\r
+\r
+ we may be able to check for the expected number of\r
+ elements in the lfds601_freelist\r
+\r
+ if the caller has given us an expected min and max\r
+ number of elements in the lfds601_queue, then the total number\r
+ of elements in the lfds601_freelist, minus that min and max,\r
+ gives us the expected number of elements in the\r
+ lfds601_freelist\r
+ */\r
+\r
+ if( vi != NULL )\r
+ {\r
+ lfds601_freelist_query( qs->fs, LFDS601_FREELIST_QUERY_ELEMENT_COUNT, NULL, (void *) &total_elements );\r
+\r
+ /* TRD : remember there is a dummy element in the lfds601_queue */\r
+ total_elements--;\r
+\r
+ lfds601_freelist_vi.min_elements = total_elements - vi->max_elements;\r
+ lfds601_freelist_vi.max_elements = total_elements - vi->min_elements;\r
+\r
+ lfds601_freelist_query( qs->fs, LFDS601_FREELIST_QUERY_VALIDATE, (void *) &lfds601_freelist_vi, (void *) lfds601_freelist_validity );\r
+ }\r
+\r
+ if( vi == NULL )\r
+ lfds601_freelist_query( qs->fs, LFDS601_FREELIST_QUERY_VALIDATE, NULL, (void *) lfds601_freelist_validity );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds601_queue_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds601_queue_enqueue( struct lfds601_queue_state *qs, void *user_data )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) struct lfds601_queue_element\r
+ *qe[LFDS601_QUEUE_PAC_SIZE];\r
+\r
+ assert( qs != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ lfds601_queue_internal_new_element_from_freelist( qs, qe, user_data );\r
+\r
+ if( qe[LFDS601_QUEUE_POINTER] == NULL )\r
+ return( 0 );\r
+\r
+ lfds601_queue_internal_queue( qs, qe );\r
+\r
+ return( 1 );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds601_queue_guaranteed_enqueue( struct lfds601_queue_state *qs, void *user_data )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) struct lfds601_queue_element\r
+ *qe[LFDS601_QUEUE_PAC_SIZE];\r
+\r
+ assert( qs != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ lfds601_queue_internal_guaranteed_new_element_from_freelist( qs, qe, user_data );\r
+\r
+ if( qe[LFDS601_QUEUE_POINTER] == NULL )\r
+ return( 0 );\r
+\r
+ lfds601_queue_internal_queue( qs, qe );\r
+\r
+ return( 1 );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_queue_internal_queue( struct lfds601_queue_state *qs, struct lfds601_queue_element *qe[LFDS601_QUEUE_PAC_SIZE] )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) struct lfds601_queue_element\r
+ *enqueue[LFDS601_QUEUE_PAC_SIZE],\r
+ *next[LFDS601_QUEUE_PAC_SIZE];\r
+\r
+ unsigned char\r
+ cas_result = 0;\r
+\r
+ assert( qs != NULL );\r
+ assert( qe != NULL );\r
+\r
+ do\r
+ {\r
+ enqueue[LFDS601_QUEUE_POINTER] = qs->enqueue[LFDS601_QUEUE_POINTER];\r
+ enqueue[LFDS601_QUEUE_COUNTER] = qs->enqueue[LFDS601_QUEUE_COUNTER];\r
+\r
+ next[LFDS601_QUEUE_POINTER] = enqueue[LFDS601_QUEUE_POINTER]->next[LFDS601_QUEUE_POINTER];\r
+ next[LFDS601_QUEUE_COUNTER] = enqueue[LFDS601_QUEUE_POINTER]->next[LFDS601_QUEUE_COUNTER];\r
+\r
+ /* TRD : this if() ensures that the next we read, just above,\r
+ really is from qs->enqueue (which we copied into enqueue)\r
+ */\r
+\r
+ if( qs->enqueue[LFDS601_QUEUE_POINTER] == enqueue[LFDS601_QUEUE_POINTER] and qs->enqueue[LFDS601_QUEUE_COUNTER] == enqueue[LFDS601_QUEUE_COUNTER] )\r
+ {\r
+ if( next[LFDS601_QUEUE_POINTER] == NULL )\r
+ {\r
+ qe[LFDS601_QUEUE_COUNTER] = next[LFDS601_QUEUE_COUNTER] + 1;\r
+ cas_result = lfds601_abstraction_dcas( (volatile lfds601_atom_t *) enqueue[LFDS601_QUEUE_POINTER]->next, (lfds601_atom_t *) qe, (lfds601_atom_t *) next );\r
+ }\r
+ else\r
+ {\r
+ next[LFDS601_QUEUE_COUNTER] = enqueue[LFDS601_QUEUE_COUNTER] + 1;\r
+ lfds601_abstraction_dcas( (volatile lfds601_atom_t *) qs->enqueue, (lfds601_atom_t *) next, (lfds601_atom_t *) enqueue );\r
+ }\r
+ }\r
+ }\r
+ while( cas_result == 0 );\r
+\r
+ qe[LFDS601_QUEUE_COUNTER] = enqueue[LFDS601_QUEUE_COUNTER] + 1;\r
+ lfds601_abstraction_dcas( (volatile lfds601_atom_t *) qs->enqueue, (lfds601_atom_t *) qe, (lfds601_atom_t *) enqueue );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds601_queue_dequeue( struct lfds601_queue_state *qs, void **user_data )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) struct lfds601_queue_element\r
+ *enqueue[LFDS601_QUEUE_PAC_SIZE],\r
+ *dequeue[LFDS601_QUEUE_PAC_SIZE],\r
+ *next[LFDS601_QUEUE_PAC_SIZE];\r
+\r
+ unsigned char\r
+ cas_result = 0;\r
+\r
+ int\r
+ rv = 1,\r
+ state = LFDS601_QUEUE_STATE_UNKNOWN,\r
+ finished_flag = LOWERED;\r
+\r
+ assert( qs != NULL );\r
+ assert( user_data != NULL );\r
+\r
+ do\r
+ {\r
+ dequeue[LFDS601_QUEUE_POINTER] = qs->dequeue[LFDS601_QUEUE_POINTER];\r
+ dequeue[LFDS601_QUEUE_COUNTER] = qs->dequeue[LFDS601_QUEUE_COUNTER];\r
+\r
+ enqueue[LFDS601_QUEUE_POINTER] = qs->enqueue[LFDS601_QUEUE_POINTER];\r
+ enqueue[LFDS601_QUEUE_COUNTER] = qs->enqueue[LFDS601_QUEUE_COUNTER];\r
+\r
+ next[LFDS601_QUEUE_POINTER] = dequeue[LFDS601_QUEUE_POINTER]->next[LFDS601_QUEUE_POINTER];\r
+ next[LFDS601_QUEUE_COUNTER] = dequeue[LFDS601_QUEUE_POINTER]->next[LFDS601_QUEUE_COUNTER];\r
+\r
+ /* TRD : confirm that dequeue didn't move between reading it\r
+ and reading its next pointer\r
+ */\r
+\r
+ if( dequeue[LFDS601_QUEUE_POINTER] == qs->dequeue[LFDS601_QUEUE_POINTER] and dequeue[LFDS601_QUEUE_COUNTER] == qs->dequeue[LFDS601_QUEUE_COUNTER] )\r
+ {\r
+ if( enqueue[LFDS601_QUEUE_POINTER] == dequeue[LFDS601_QUEUE_POINTER] and next[LFDS601_QUEUE_POINTER] == NULL )\r
+ state = LFDS601_QUEUE_STATE_EMPTY;\r
+\r
+ if( enqueue[LFDS601_QUEUE_POINTER] == dequeue[LFDS601_QUEUE_POINTER] and next[LFDS601_QUEUE_POINTER] != NULL )\r
+ state = LFDS601_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE;\r
+\r
+ if( enqueue[LFDS601_QUEUE_POINTER] != dequeue[LFDS601_QUEUE_POINTER] )\r
+ state = LFDS601_QUEUE_STATE_ATTEMPT_DELFDS601_QUEUE;\r
+\r
+ switch( state )\r
+ {\r
+ case LFDS601_QUEUE_STATE_EMPTY:\r
+ *user_data = NULL;\r
+ rv = 0;\r
+ finished_flag = RAISED;\r
+ break;\r
+\r
+ case LFDS601_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE:\r
+ next[LFDS601_QUEUE_COUNTER] = enqueue[LFDS601_QUEUE_COUNTER] + 1;\r
+ lfds601_abstraction_dcas( (volatile lfds601_atom_t *) qs->enqueue, (lfds601_atom_t *) next, (lfds601_atom_t *) enqueue );\r
+ break;\r
+\r
+ case LFDS601_QUEUE_STATE_ATTEMPT_DELFDS601_QUEUE:\r
+ *user_data = next[LFDS601_QUEUE_POINTER]->user_data;\r
+\r
+ next[LFDS601_QUEUE_COUNTER] = dequeue[LFDS601_QUEUE_COUNTER] + 1;\r
+ cas_result = lfds601_abstraction_dcas( (volatile lfds601_atom_t *) qs->dequeue, (lfds601_atom_t *) next, (lfds601_atom_t *) dequeue );\r
+\r
+ if( cas_result == 1 )\r
+ finished_flag = RAISED;\r
+ break;\r
+ }\r
+ }\r
+ }\r
+ while( finished_flag == LOWERED );\r
+\r
+ if( cas_result == 1 )\r
+ lfds601_freelist_push( qs->fs, dequeue[LFDS601_QUEUE_POINTER]->fe );\r
+\r
+ return( rv );\r
+}\r
+\r
--- /dev/null
+#include "lfds601_ringbuffer_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_ringbuffer_delete( struct lfds601_ringbuffer_state *rs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ assert( rs != NULL );\r
+ // TRD : user_data_delete_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ lfds601_queue_delete( rs->qs, NULL, NULL );\r
+\r
+ lfds601_freelist_delete( rs->fs, user_data_delete_function, user_state );\r
+\r
+ lfds601_abstraction_aligned_free( rs );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds601_ringbuffer_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds601_freelist_element *lfds601_ringbuffer_get_read_element( struct lfds601_ringbuffer_state *rs, struct lfds601_freelist_element **fe )\r
+{\r
+ assert( rs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ lfds601_queue_dequeue( rs->qs, (void **) fe );\r
+\r
+ return( *fe );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds601_freelist_element *lfds601_ringbuffer_get_write_element( struct lfds601_ringbuffer_state *rs, struct lfds601_freelist_element **fe, int *overwrite_flag )\r
+{\r
+ assert( rs != NULL );\r
+ assert( fe != NULL );\r
+ // TRD : overwrite_flag can be NULL\r
+\r
+ /* TRD : we try to obtain an element from the lfds601_freelist\r
+ if we can, we populate it and add it to the lfds601_queue\r
+\r
+ if we cannot, then the lfds601_ringbuffer is full\r
+ so instead we grab the current read element and\r
+ use that instead\r
+\r
+ dequeue may fail since the lfds601_queue may be emptied\r
+ during our dequeue attempt\r
+\r
+ so what we actually do here is a loop, attempting\r
+ the lfds601_freelist and if it fails then a dequeue, until\r
+ we obtain an element\r
+\r
+ once we have an element, we lfds601_queue it\r
+\r
+ you may be wondering why this operation is in a loop\r
+ remember - these operations are lock-free; anything\r
+ can happen in between\r
+\r
+ so for example the pop could fail because the lfds601_freelist\r
+ is empty; but by the time we go to get an element from\r
+ the lfds601_queue, the whole lfds601_queue has been emptied back into\r
+ the lfds601_freelist!\r
+\r
+ if overwrite_flag is provided, we set it to 0 if we\r
+ obtained a new element from the lfds601_freelist, 1 if we\r
+ stole an element from the lfds601_queue\r
+ */\r
+\r
+ do\r
+ {\r
+ if( overwrite_flag != NULL )\r
+ *overwrite_flag = 0;\r
+\r
+ lfds601_freelist_pop( rs->fs, fe );\r
+\r
+ if( *fe == NULL )\r
+ {\r
+ lfds601_ringbuffer_get_read_element( rs, fe );\r
+\r
+ if( overwrite_flag != NULL and *fe != NULL )\r
+ *overwrite_flag = 1;\r
+ }\r
+ }\r
+ while( *fe == NULL );\r
+\r
+ return( *fe );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_ringbuffer_put_read_element( struct lfds601_ringbuffer_state *rs, struct lfds601_freelist_element *fe )\r
+{\r
+ assert( rs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ lfds601_freelist_push( rs->fs, fe );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_ringbuffer_put_write_element( struct lfds601_ringbuffer_state *rs, struct lfds601_freelist_element *fe )\r
+{\r
+ assert( rs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ lfds601_queue_enqueue( rs->qs, fe );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+/***** the library wide include file *****/\r
+#include "lfds601_internal.h"\r
+\r
+/***** defines *****/\r
+\r
+/***** structures *****/\r
+#pragma pack( push, LFDS601_ALIGN_DOUBLE_POINTER )\r
+\r
+struct lfds601_ringbuffer_state\r
+{\r
+ struct lfds601_queue_state\r
+ *qs;\r
+\r
+ struct lfds601_freelist_state\r
+ *fs;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
+/***** externs *****/\r
+\r
+/***** private prototypes *****/\r
+void lfds601_ringbuffer_internal_validate( struct lfds601_ringbuffer_state *rs, struct lfds601_validation_info *vi, enum lfds601_data_structure_validity *lfds601_queue_validity, enum lfds601_data_structure_validity *lfds601_freelist_validity );\r
+\r
--- /dev/null
+#include "lfds601_ringbuffer_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds601_ringbuffer_new( struct lfds601_ringbuffer_state **rs, lfds601_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ assert( rs != NULL );\r
+ // TRD : number_elements can be any value in its range\r
+ // TRD : user_data_init_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ *rs = (struct lfds601_ringbuffer_state *) lfds601_abstraction_aligned_malloc( sizeof(struct lfds601_ringbuffer_state), LFDS601_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( *rs != NULL )\r
+ {\r
+ lfds601_freelist_new( &(*rs)->fs, number_elements, user_data_init_function, user_state );\r
+\r
+ if( (*rs)->fs != NULL )\r
+ {\r
+ lfds601_queue_new( &(*rs)->qs, number_elements );\r
+\r
+ if( (*rs)->qs != NULL )\r
+ rv = 1;\r
+\r
+ if( (*rs)->qs == NULL )\r
+ {\r
+ lfds601_abstraction_aligned_free( *rs );\r
+ *rs = NULL;\r
+ }\r
+ }\r
+\r
+ if( (*rs)->fs == NULL )\r
+ {\r
+ lfds601_abstraction_aligned_free( *rs );\r
+ *rs = NULL;\r
+ }\r
+ }\r
+\r
+ return( rv );\r
+}\r
+\r
--- /dev/null
+#include "lfds601_ringbuffer_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds601_ringbuffer_query( struct lfds601_ringbuffer_state *rs, enum lfds601_ringbuffer_query_type query_type, void *query_input, void *query_output )\r
+{\r
+ assert( rs != NULL );\r
+ // TRD : query_type can be any value in its range\r
+ // TRD : query_input can be NULL\r
+ assert( query_output != NULL );\r
+\r
+ switch( query_type )\r
+ {\r
+ case LFDS601_RINGBUFFER_QUERY_VALIDATE:\r
+ // TRD : query_input can be NULL\r
+\r
+ lfds601_ringbuffer_internal_validate( rs, (struct lfds601_validation_info *) query_input, (enum lfds601_data_structure_validity *) query_output, ((enum lfds601_data_structure_validity *) query_output)+2 );\r
+ break;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_ringbuffer_internal_validate( struct lfds601_ringbuffer_state *rs, struct lfds601_validation_info *vi, enum lfds601_data_structure_validity *lfds601_queue_validity, enum lfds601_data_structure_validity *lfds601_freelist_validity )\r
+{\r
+ assert( rs != NULL );\r
+ // TRD : vi can be NULL\r
+ assert( lfds601_queue_validity != NULL );\r
+ assert( lfds601_freelist_validity != NULL );\r
+\r
+ lfds601_queue_query( rs->qs, LFDS601_QUEUE_QUERY_VALIDATE, vi, lfds601_queue_validity );\r
+\r
+ if( vi != NULL )\r
+ {\r
+ struct lfds601_validation_info\r
+ lfds601_freelist_vi;\r
+\r
+ lfds601_atom_t\r
+ total_elements;\r
+\r
+ lfds601_freelist_query( rs->fs, LFDS601_FREELIST_QUERY_ELEMENT_COUNT, NULL, (void *) &total_elements );\r
+ lfds601_freelist_vi.min_elements = total_elements - vi->max_elements;\r
+ lfds601_freelist_vi.max_elements = total_elements - vi->min_elements;\r
+ lfds601_freelist_query( rs->fs, LFDS601_FREELIST_QUERY_VALIDATE, (void *) &lfds601_freelist_vi, (void *) lfds601_freelist_validity );\r
+ }\r
+\r
+ if( vi == NULL )\r
+ lfds601_freelist_query( rs->fs, LFDS601_FREELIST_QUERY_VALIDATE, NULL, (void *) lfds601_freelist_validity );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds601_slist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_slist_delete( struct lfds601_slist_state *ss )\r
+{\r
+ lfds601_slist_delete_all_elements( ss );\r
+\r
+ lfds601_abstraction_aligned_free( ss );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_slist_delete_element( struct lfds601_slist_state *ss, struct lfds601_slist_element *se )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) void\r
+ *volatile user_data_and_flags[2],\r
+ *volatile new_user_data_and_flags[2];\r
+\r
+ unsigned char\r
+ cas_rv = 0;\r
+\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+\r
+ user_data_and_flags[LFDS601_SLIST_USER_DATA] = se->user_data_and_flags[LFDS601_SLIST_USER_DATA];\r
+ user_data_and_flags[LFDS601_SLIST_FLAGS] = se->user_data_and_flags[LFDS601_SLIST_FLAGS];\r
+\r
+ do\r
+ {\r
+ new_user_data_and_flags[LFDS601_SLIST_USER_DATA] = user_data_and_flags[LFDS601_SLIST_USER_DATA];\r
+ new_user_data_and_flags[LFDS601_SLIST_FLAGS] = (void *) ((lfds601_atom_t) user_data_and_flags[LFDS601_SLIST_FLAGS] | LFDS601_SLIST_FLAG_DELETED);\r
+ }\r
+ while( !((lfds601_atom_t) user_data_and_flags[LFDS601_SLIST_FLAGS] & LFDS601_SLIST_FLAG_DELETED) and 0 == (cas_rv = lfds601_abstraction_dcas((volatile lfds601_atom_t *) se->user_data_and_flags, (lfds601_atom_t *) new_user_data_and_flags, (lfds601_atom_t *) user_data_and_flags)) );\r
+\r
+ if( cas_rv == 1 )\r
+ if( ss->user_data_delete_function != NULL )\r
+ ss->user_data_delete_function( (void *) user_data_and_flags[LFDS601_SLIST_USER_DATA], ss->user_state );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_slist_delete_all_elements( struct lfds601_slist_state *ss )\r
+{\r
+ struct lfds601_slist_element\r
+ *volatile se,\r
+ *volatile se_temp;\r
+\r
+ se = ss->head;\r
+\r
+ while( se != NULL )\r
+ {\r
+ // TRD : if a non-deleted element and there is a delete function, call the delete function\r
+ if( ss->user_data_delete_function != NULL )\r
+ ss->user_data_delete_function( (void *) se->user_data_and_flags[LFDS601_SLIST_USER_DATA], ss->user_state );\r
+\r
+ se_temp = se;\r
+ se = se->next;\r
+ lfds601_abstraction_aligned_free( (void *) se_temp );\r
+ }\r
+\r
+ lfds601_slist_internal_init_slist( ss, ss->user_data_delete_function, ss->user_state );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds601_slist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds601_slist_get_user_data_from_element( struct lfds601_slist_element *se, void **user_data )\r
+{\r
+ int\r
+ rv = 1;\r
+\r
+ assert( se != NULL );\r
+ assert( user_data != NULL );\r
+\r
+ *user_data = (void *) se->user_data_and_flags[LFDS601_SLIST_USER_DATA];\r
+\r
+ if( (lfds601_atom_t) se->user_data_and_flags[LFDS601_SLIST_FLAGS] & LFDS601_SLIST_FLAG_DELETED )\r
+ rv = 0;\r
+\r
+ return( rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds601_slist_set_user_data_in_element( struct lfds601_slist_element *se, void *user_data )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) volatile void\r
+ *user_data_and_flags[2],\r
+ *new_user_data_and_flags[2];\r
+\r
+ int\r
+ rv = 1;\r
+\r
+ assert( se != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ user_data_and_flags[LFDS601_SLIST_USER_DATA] = se->user_data_and_flags[LFDS601_SLIST_USER_DATA];\r
+ user_data_and_flags[LFDS601_SLIST_FLAGS] = se->user_data_and_flags[LFDS601_SLIST_FLAGS];\r
+\r
+ new_user_data_and_flags[LFDS601_SLIST_USER_DATA] = user_data;\r
+\r
+ do\r
+ {\r
+ new_user_data_and_flags[LFDS601_SLIST_FLAGS] = user_data_and_flags[LFDS601_SLIST_FLAGS];\r
+ }\r
+ while( !((lfds601_atom_t) user_data_and_flags[LFDS601_SLIST_FLAGS] & LFDS601_SLIST_FLAG_DELETED) and 0 == lfds601_abstraction_dcas((volatile lfds601_atom_t *) se->user_data_and_flags, (lfds601_atom_t *) new_user_data_and_flags, (lfds601_atom_t *) user_data_and_flags) );\r
+\r
+ if( (lfds601_atom_t) user_data_and_flags[LFDS601_SLIST_FLAGS] & LFDS601_SLIST_FLAG_DELETED )\r
+ rv = 0;\r
+\r
+ return( rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds601_slist_element *lfds601_slist_get_head( struct lfds601_slist_state *ss, struct lfds601_slist_element **se )\r
+{\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+\r
+ *se = (struct lfds601_slist_element *) ss->head;\r
+\r
+ lfds601_slist_internal_move_to_first_undeleted_element( se );\r
+\r
+ return( *se );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds601_slist_element *lfds601_slist_get_next( struct lfds601_slist_element *se, struct lfds601_slist_element **next_se )\r
+{\r
+ assert( se != NULL );\r
+ assert( next_se != NULL );\r
+\r
+ *next_se = (struct lfds601_slist_element *) se->next;\r
+\r
+ lfds601_slist_internal_move_to_first_undeleted_element( next_se );\r
+\r
+ return( *next_se );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds601_slist_element *lfds601_slist_get_head_and_then_next( struct lfds601_slist_state *ss, struct lfds601_slist_element **se )\r
+{\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+\r
+ if( *se == NULL )\r
+ lfds601_slist_get_head( ss, se );\r
+ else\r
+ lfds601_slist_get_next( *se, se );\r
+\r
+ return( *se );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_slist_internal_move_to_first_undeleted_element( struct lfds601_slist_element **se )\r
+{\r
+ assert( se != NULL );\r
+\r
+ while( *se != NULL and (lfds601_atom_t) (*se)->user_data_and_flags[LFDS601_SLIST_FLAGS] & LFDS601_SLIST_FLAG_DELETED )\r
+ (*se) = (struct lfds601_slist_element *) (*se)->next;\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+/***** the library wide include file *****/\r
+#include "lfds601_internal.h"\r
+\r
+/***** defines *****/\r
+#define LFDS601_SLIST_USER_DATA 0\r
+#define LFDS601_SLIST_FLAGS 1\r
+\r
+#define LFDS601_SLIST_NO_FLAGS 0x0\r
+#define LFDS601_SLIST_FLAG_DELETED 0x1\r
+\r
+/***** structures *****/\r
+#pragma pack( push, LFDS601_ALIGN_SINGLE_POINTER )\r
+\r
+struct lfds601_slist_state\r
+{\r
+ struct lfds601_slist_element\r
+ *volatile head;\r
+\r
+ void\r
+ (*user_data_delete_function)( void *user_data, void *user_state ),\r
+ *user_state;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
+#pragma pack( push, LFDS601_ALIGN_DOUBLE_POINTER )\r
+\r
+/* TRD : this pragma pack doesn't seem to work under Windows\r
+ if the structure members are the correct way round\r
+ (next first), then user_data_and_flags ends up on\r
+ a single pointer boundary and DCAS crashes\r
+\r
+ accordingly, I've moved user_data_and_flags first\r
+*/\r
+\r
+struct lfds601_slist_element\r
+{\r
+ void\r
+ *volatile user_data_and_flags[2];\r
+\r
+ // TRD : requires volatile as is target of CAS\r
+ struct lfds601_slist_element\r
+ *volatile next;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
+/***** private prototypes *****/\r
+void lfds601_slist_internal_init_slist( struct lfds601_slist_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );\r
+\r
+void lfds601_slist_internal_link_element_to_head( struct lfds601_slist_state *lfds601_slist_state, struct lfds601_slist_element *volatile se );\r
+void lfds601_slist_internal_link_element_after_element( struct lfds601_slist_element *volatile lfds601_slist_in_list_element, struct lfds601_slist_element *volatile se );\r
+\r
+void lfds601_slist_internal_move_to_first_undeleted_element( struct lfds601_slist_element **se );\r
+\r
--- /dev/null
+#include "lfds601_slist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_slist_internal_link_element_to_head( struct lfds601_slist_state *ss, struct lfds601_slist_element *volatile se )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_SINGLE_POINTER) struct lfds601_slist_element\r
+ *se_next;\r
+\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+\r
+ se_next = ss->head;\r
+\r
+ do\r
+ {\r
+ se->next = se_next;\r
+ }\r
+ while( se->next != (se_next = (struct lfds601_slist_element *) lfds601_abstraction_cas((volatile lfds601_atom_t *) &ss->head, (lfds601_atom_t) se, (lfds601_atom_t) se->next)) );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_slist_internal_link_element_after_element( struct lfds601_slist_element *volatile lfds601_slist_in_list_element, struct lfds601_slist_element *volatile se )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_SINGLE_POINTER) struct lfds601_slist_element\r
+ *se_prev,\r
+ *se_next;\r
+\r
+ assert( lfds601_slist_in_list_element != NULL );\r
+ assert( se != NULL );\r
+\r
+ se_prev = (struct lfds601_slist_element *) lfds601_slist_in_list_element;\r
+\r
+ se_next = se_prev->next;\r
+\r
+ do\r
+ {\r
+ se->next = se_next;\r
+ }\r
+ while( se->next != (se_next = (struct lfds601_slist_element *) lfds601_abstraction_cas((volatile lfds601_atom_t *) &se_prev->next, (lfds601_atom_t) se, (lfds601_atom_t) se->next)) );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds601_slist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds601_slist_new( struct lfds601_slist_state **ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ assert( ss != NULL );\r
+ // TRD : user_data_delete_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ *ss = (struct lfds601_slist_state *) lfds601_abstraction_aligned_malloc( sizeof(struct lfds601_slist_state), LFDS601_ALIGN_SINGLE_POINTER );\r
+\r
+ if( *ss != NULL )\r
+ {\r
+ lfds601_slist_internal_init_slist( *ss, user_data_delete_function, user_state );\r
+ rv = 1;\r
+ }\r
+\r
+ return( rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_slist_internal_init_slist( struct lfds601_slist_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ assert( ss != NULL );\r
+ // TRD : user_data_delete_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ ss->head = NULL;\r
+ ss->user_data_delete_function = user_data_delete_function;\r
+ ss->user_state = user_state;\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds601_slist_element *lfds601_slist_new_head( struct lfds601_slist_state *ss, void *user_data )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_SINGLE_POINTER) struct lfds601_slist_element\r
+ *volatile se;\r
+\r
+ assert( ss != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ se = (struct lfds601_slist_element *) lfds601_abstraction_aligned_malloc( sizeof(struct lfds601_slist_element), LFDS601_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( se != NULL )\r
+ {\r
+ se->user_data_and_flags[LFDS601_SLIST_USER_DATA] = user_data;\r
+ se->user_data_and_flags[LFDS601_SLIST_FLAGS] = LFDS601_SLIST_NO_FLAGS;\r
+\r
+ lfds601_slist_internal_link_element_to_head( ss, se );\r
+ }\r
+\r
+ return( (struct lfds601_slist_element *) se );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds601_slist_element *lfds601_slist_new_next( struct lfds601_slist_element *se, void *user_data )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_SINGLE_POINTER) struct lfds601_slist_element\r
+ *volatile se_next;\r
+\r
+ assert( se != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ se_next = (struct lfds601_slist_element *) lfds601_abstraction_aligned_malloc( sizeof(struct lfds601_slist_element), LFDS601_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( se_next != NULL )\r
+ {\r
+ se_next->user_data_and_flags[LFDS601_SLIST_USER_DATA] = user_data;\r
+ se_next->user_data_and_flags[LFDS601_SLIST_FLAGS] = LFDS601_SLIST_NO_FLAGS;\r
+\r
+ lfds601_slist_internal_link_element_after_element( se, se_next );\r
+ }\r
+\r
+ return( (struct lfds601_slist_element *) se_next );\r
+}\r
+\r
--- /dev/null
+#include "lfds601_stack_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_stack_delete( struct lfds601_stack_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ void\r
+ *user_data;\r
+\r
+ assert( ss != NULL );\r
+ // TRD : user_data_delete_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ while( lfds601_stack_pop(ss, &user_data) )\r
+ if( user_data_delete_function != NULL )\r
+ user_data_delete_function( user_data, user_state );\r
+\r
+ lfds601_freelist_delete( ss->fs, lfds601_stack_internal_freelist_delete_function, NULL );\r
+\r
+ lfds601_abstraction_aligned_free( ss );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_stack_clear( struct lfds601_stack_state *ss, void (*user_data_clear_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ void\r
+ *user_data;\r
+\r
+ assert( ss != NULL );\r
+ // TRD : user_data_clear_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ while( lfds601_stack_pop(ss, &user_data) )\r
+ if( user_data_clear_function != NULL )\r
+ user_data_clear_function( user_data, user_state );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds601_stack_internal_freelist_delete_function( void *user_data, void *user_state )\r
+{\r
+ assert( user_data != NULL );\r
+ assert( user_state == NULL );\r
+\r
+ lfds601_abstraction_aligned_free( user_data );\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
--- /dev/null
+/***** the library wide include file *****/\r
+#include "lfds601_internal.h"\r
+\r
+/***** pragmas *****/\r
+\r
+/***** defines *****/\r
+#define LFDS601_STACK_POINTER 0\r
+#define LFDS601_STACK_COUNTER 1\r
+#define LFDS601_STACK_PAC_SIZE 2\r
+\r
+/***** structures *****/\r
+#pragma pack( push, LFDS601_ALIGN_DOUBLE_POINTER )\r
+\r
+struct lfds601_stack_state\r
+{\r
+ // TRD : must come first for alignment\r
+ struct lfds601_stack_element\r
+ *volatile top[LFDS601_STACK_PAC_SIZE];\r
+\r
+ lfds601_atom_t\r
+ aba_counter;\r
+\r
+ struct lfds601_freelist_state\r
+ *fs;\r
+};\r
+\r
+struct lfds601_stack_element\r
+{\r
+ struct lfds601_stack_element\r
+ *next[LFDS601_STACK_PAC_SIZE];\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ void\r
+ *user_data;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
+/***** private prototypes *****/\r
+int lfds601_stack_internal_freelist_init_function( void **user_data, void *user_state );\r
+void lfds601_stack_internal_freelist_delete_function( void *user_data, void *user_state );\r
+\r
+void lfds601_stack_internal_new_element_from_freelist( struct lfds601_stack_state *ss, struct lfds601_stack_element *se[LFDS601_STACK_PAC_SIZE], void *user_data );\r
+void lfds601_stack_internal_new_element( struct lfds601_stack_state *ss, struct lfds601_stack_element *se[LFDS601_STACK_PAC_SIZE], void *user_data );\r
+void lfds601_stack_internal_init_element( struct lfds601_stack_state *ss, struct lfds601_stack_element *se[LFDS601_STACK_PAC_SIZE], struct lfds601_freelist_element *fe, void *user_data );\r
+\r
+void lfds601_stack_internal_push( struct lfds601_stack_state *ss, struct lfds601_stack_element *se[LFDS601_STACK_PAC_SIZE] );\r
+\r
--- /dev/null
+#include "lfds601_stack_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds601_stack_new( struct lfds601_stack_state **ss, lfds601_atom_t number_elements )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ assert( ss != NULL );\r
+ // TRD : number_elements can be any value in its range\r
+\r
+ *ss = (struct lfds601_stack_state *) lfds601_abstraction_aligned_malloc( sizeof(struct lfds601_stack_state), LFDS601_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( *ss != NULL )\r
+ {\r
+ // TRD : the size of the lfds601_freelist is the size of the lfds601_stack\r
+ lfds601_freelist_new( &(*ss)->fs, number_elements, lfds601_stack_internal_freelist_init_function, NULL );\r
+\r
+ if( (*ss)->fs == NULL )\r
+ {\r
+ lfds601_abstraction_aligned_free( *ss );\r
+ *ss = NULL;\r
+ }\r
+\r
+ if( (*ss)->fs != NULL )\r
+ {\r
+ (*ss)->top[LFDS601_STACK_POINTER] = NULL;\r
+ (*ss)->top[LFDS601_STACK_COUNTER] = 0;\r
+ (*ss)->aba_counter = 0;\r
+ rv = 1;\r
+ }\r
+ }\r
+\r
+ return( rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+int lfds601_stack_internal_freelist_init_function( void **user_data, void *user_state )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ assert( user_data != NULL );\r
+ assert( user_state == NULL );\r
+\r
+ *user_data = lfds601_abstraction_aligned_malloc( sizeof(struct lfds601_stack_element), LFDS601_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( *user_data != NULL )\r
+ rv = 1;\r
+\r
+ return( rv );\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_stack_internal_new_element_from_freelist( struct lfds601_stack_state *ss, struct lfds601_stack_element *se[LFDS601_STACK_PAC_SIZE], void *user_data )\r
+{\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+ // TRD : user_data can be any value in its range\r
+\r
+ lfds601_freelist_pop( ss->fs, &fe );\r
+\r
+ if( fe == NULL )\r
+ se[LFDS601_STACK_POINTER] = NULL;\r
+\r
+ if( fe != NULL )\r
+ lfds601_stack_internal_init_element( ss, se, fe, user_data );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_stack_internal_new_element( struct lfds601_stack_state *ss, struct lfds601_stack_element *se[LFDS601_STACK_PAC_SIZE], void *user_data )\r
+{\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+ // TRD : user_data can be any value in its range\r
+\r
+ lfds601_freelist_guaranteed_pop( ss->fs, &fe );\r
+\r
+ if( fe == NULL )\r
+ se[LFDS601_STACK_POINTER] = NULL;\r
+\r
+ if( fe != NULL )\r
+ lfds601_stack_internal_init_element( ss, se, fe, user_data );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_stack_internal_init_element( struct lfds601_stack_state *ss, struct lfds601_stack_element *se[LFDS601_STACK_PAC_SIZE], struct lfds601_freelist_element *fe, void *user_data )\r
+{\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+ assert( fe != NULL );\r
+ // TRD : user_data can be any value in its range\r
+\r
+ lfds601_freelist_get_user_data_from_element( fe, (void **) &se[LFDS601_STACK_POINTER] );\r
+\r
+ se[LFDS601_STACK_COUNTER] = (struct lfds601_stack_element *) lfds601_abstraction_increment( (lfds601_atom_t *) &ss->aba_counter );\r
+\r
+ se[LFDS601_STACK_POINTER]->next[LFDS601_STACK_POINTER] = NULL;\r
+ se[LFDS601_STACK_POINTER]->next[LFDS601_STACK_COUNTER] = 0;\r
+ se[LFDS601_STACK_POINTER]->fe = fe;\r
+ se[LFDS601_STACK_POINTER]->user_data = user_data;\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds601_stack_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds601_stack_push( struct lfds601_stack_state *ss, void *user_data )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) struct lfds601_stack_element\r
+ *se[LFDS601_STACK_PAC_SIZE];\r
+\r
+ assert( ss != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ lfds601_stack_internal_new_element_from_freelist( ss, se, user_data );\r
+\r
+ if( se[LFDS601_STACK_POINTER] == NULL )\r
+ return( 0 );\r
+\r
+ lfds601_stack_internal_push( ss, se );\r
+\r
+ return( 1 );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds601_stack_guaranteed_push( struct lfds601_stack_state *ss, void *user_data )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) struct lfds601_stack_element\r
+ *se[LFDS601_STACK_PAC_SIZE];\r
+\r
+ assert( ss != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ /* TRD : this function allocated a new lfds601_freelist element and uses that\r
+ to push onto the lfds601_stack, guaranteeing success (unless malloc()\r
+ fails of course)\r
+ */\r
+\r
+ lfds601_stack_internal_new_element( ss, se, user_data );\r
+\r
+ // TRD : malloc failed\r
+ if( se[LFDS601_STACK_POINTER] == NULL )\r
+ return( 0 );\r
+\r
+ lfds601_stack_internal_push( ss, se );\r
+\r
+ return( 1 );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds601_stack_internal_push( struct lfds601_stack_state *ss, struct lfds601_stack_element *se[LFDS601_STACK_PAC_SIZE] )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) struct lfds601_stack_element\r
+ *original_se_next[LFDS601_STACK_PAC_SIZE];\r
+\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+\r
+ original_se_next[LFDS601_STACK_POINTER] = ss->top[LFDS601_STACK_POINTER];\r
+ original_se_next[LFDS601_STACK_COUNTER] = ss->top[LFDS601_STACK_COUNTER];\r
+\r
+ do\r
+ {\r
+ se[LFDS601_STACK_POINTER]->next[LFDS601_STACK_POINTER] = original_se_next[LFDS601_STACK_POINTER];\r
+ se[LFDS601_STACK_POINTER]->next[LFDS601_STACK_COUNTER] = original_se_next[LFDS601_STACK_COUNTER];\r
+ }\r
+ while( 0 == lfds601_abstraction_dcas((volatile lfds601_atom_t *) ss->top, (lfds601_atom_t *) se, (lfds601_atom_t *) original_se_next) );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds601_stack_pop( struct lfds601_stack_state *ss, void **user_data )\r
+{\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) struct lfds601_stack_element\r
+ *se[LFDS601_STACK_PAC_SIZE];\r
+\r
+ assert( ss != NULL );\r
+ assert( user_data != NULL );\r
+\r
+ se[LFDS601_STACK_COUNTER] = ss->top[LFDS601_STACK_COUNTER];\r
+ se[LFDS601_STACK_POINTER] = ss->top[LFDS601_STACK_POINTER];\r
+\r
+ do\r
+ {\r
+ if( se[LFDS601_STACK_POINTER] == NULL )\r
+ return( 0 );\r
+ }\r
+ while( 0 == lfds601_abstraction_dcas((volatile lfds601_atom_t *) ss->top, (lfds601_atom_t *) se[LFDS601_STACK_POINTER]->next, (lfds601_atom_t *) se) );\r
+\r
+ *user_data = se[LFDS601_STACK_POINTER]->user_data;\r
+\r
+ lfds601_freelist_push( ss->fs, se[LFDS601_STACK_POINTER]->fe );\r
+\r
+ return( 1 );\r
+}\r
+\r
--- /dev/null
+#include "lfds601_stack_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds601_stack_query( struct lfds601_stack_state *ss, enum lfds601_stack_query_type query_type, void *query_input, void *query_output )\r
+{\r
+ assert( ss != NULL );\r
+ // TRD : query_type can be any value in its range\r
+ // TRD : query_iput can be NULL\r
+ assert( query_output != NULL );\r
+\r
+ switch( query_type )\r
+ {\r
+ case LFDS601_STACK_QUERY_ELEMENT_COUNT:\r
+ assert( query_input == NULL );\r
+\r
+ lfds601_freelist_query( ss->fs, LFDS601_FREELIST_QUERY_ELEMENT_COUNT, NULL, query_output );\r
+ break;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
--- /dev/null
+building test\r
+=============\r
+\r
+Windows (user-mode)\r
+===================\r
+1. Use Microsoft Visual Studio 2008 or Visual C++ 2008 Express Edition\r
+ to load "test.sln".\r
+\r
+2. Use Microsoft Windows SDK and GNUmake to run makefile.windows (obviously\r
+ you'll need to have run the appropriate vcvars*.bat first; you can build\r
+ for both IA64, 64-bit and 32-bit - just run the correct vcvars batch file).\r
+\r
+ Targets are "rel", "dbg" and "clean". You need to clean between switching\r
+ targets.\r
+\r
+Windows (kernel)\r
+================\r
+No build supported, since this is a command line utility.\r
+\r
+Linux\r
+=====\r
+Use GNUmake to run "makefile.linux". Targets are "rel", "dbg" and\r
+"clean". You need to clean between switching targets.\r
+\r
+\r
--- /dev/null
+##### paths #####\r
+BINDIR = bin\r
+INCDIR = ../liblfds601/inc\r
+LIBDIR = ../liblfds601/bin\r
+OBJDIR = obj\r
+SRCDIR = src\r
+\r
+##### misc #####\r
+QUIETLY = 1>nul 2>nul\r
+\r
+##### sources, objects and libraries #####\r
+BINNAME = test\r
+BINARY = $(BINDIR)/$(BINNAME)\r
+SRCDIRS = .\r
+SOURCES = abstraction_cpu_count.c test_abstraction.c abstraction_thread_start.c abstraction_thread_wait.c benchmark_freelist.c benchmark_queue.c benchmark_ringbuffer.c benchmark_stack.c test_freelist.c main.c misc.c test_queue.c test_ringbuffer.c test_slist.c test_stack.c\r
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))\r
+SYSLIBS = -lpthread -lc -lm\r
+USRLIBS = -llfds601\r
+\r
+##### CPU variants #####\r
+GCCARCH = $(shell uname -m)\r
+\r
+ifeq ($(GCCARCH),x86_64)\r
+ GCCARCH = core2\r
+endif\r
+\r
+ifeq ($(findstring arm,$(GCCARCH)),arm)\r
+ GCCARCH = armv6k\r
+endif\r
+\r
+##### tools #####\r
+MAKE = make\r
+MFLAGS = \r
+\r
+DG = gcc\r
+DGFLAGS = -MM -std=c99 -I"$(SRCDIR)" -I"$(INCDIR)" \r
+\r
+CC = gcc\r
+CFBASE = -Wall -Wno-unknown-pragmas -std=c99 -march=$(GCCARCH) -c -I"$(SRCDIR)" -I"$(INCDIR)"\r
+CFREL = -O2 -Wno-strict-aliasing\r
+CFDBG = -O0 -g\r
+\r
+LD = gcc\r
+LFBASE = -L"$(LIBDIR)"\r
+LFREL = -O2 -s\r
+LFDBG = -O0 -g\r
+\r
+##### variants #####\r
+CFLAGS = $(CFBASE) $(CFDBG)\r
+LFLAGS = $(LFBASE) $(LFDBG)\r
+\r
+ifeq ($(MAKECMDGOALS),rel)\r
+ CFLAGS = $(CFBASE) $(CFREL)\r
+ LFLAGS = $(LFBASE) $(LFREL)\r
+endif\r
+\r
+##### search paths #####\r
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))\r
+\r
+##### implicit rules #####\r
+$(OBJDIR)/%.o : %.c\r
+ $(DG) $(DGFLAGS) $< >$(OBJDIR)/$*.d\r
+ $(CC) $(CFLAGS) -o $@ $<\r
+\r
+##### explicit rules #####\r
+$(BINARY) : $(OBJECTS)\r
+ $(LD) -o $(BINARY) $(LFLAGS) $(OBJECTS) $(USRLIBS) $(SYSLIBS)\r
+ chmod +x $(BINARY)\r
+\r
+##### phony #####\r
+.PHONY : clean rel dbg\r
+\r
+clean : \r
+ @rm -f $(BINDIR)/$(BINNAME) $(OBJDIR)/*.o $(OBJDIR)/*.d\r
+\r
+rel : $(BINARY)\r
+dbg : $(BINARY)\r
+\r
+##### dependencies #####\r
+-include $(DEPENDS)\r
+\r
--- /dev/null
+##### paths #####\r
+BINDIR = bin\r
+INCDIR = ../liblfds601/inc\r
+LIBDIR = ../liblfds601/bin\r
+OBJDIR = obj\r
+SRCDIR = src\r
+\r
+##### misc #####\r
+QUIETLY = 1>nul 2>nul\r
+\r
+##### sources, objects and libraries #####\r
+BINNAME = test\r
+BINARY = $(BINDIR)\$(BINNAME).exe\r
+SRCDIRS = .\r
+SOURCES = abstraction_cpu_count.c test_abstraction.c abstraction_thread_start.c abstraction_thread_wait.c benchmark_freelist.c benchmark_queue.c benchmark_ringbuffer.c benchmark_stack.c test_freelist.c main.c misc.c test_queue.c test_ringbuffer.c test_slist.c test_stack.c\r
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))\r
+SYSLIBS = kernel32.lib\r
+USRLIBS = liblfds601.lib\r
+\r
+##### tools #####\r
+MAKE = make\r
+MFLAGS = \r
+\r
+CC = cl\r
+CFBASE = /nologo /W4 /WX /c "-I$(SRCDIR)" "-I$(INCLUDE)" "-I$(INCDIR)" "/Fd$(BINDIR)\$(BINNAME).pdb" /D UNICODE /D _UNICODE /DWIN32_LEAN_AND_MEAN /D_CRT_SECURE_NO_WARNINGS\r
+CFREL = /Ox /DNDEBUG /MT\r
+CFDBG = /Od /Gm /Zi /D_DEBUG /MTd\r
+\r
+LD = link\r
+LFBASE = "/libpath:$(LIB)" "/libpath:$(LIBDIR)" /nologo /subsystem:console /nodefaultlib /nxcompat /wx\r
+LFREL = /incremental:no\r
+LFDBG = /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"\r
+\r
+##### variants #####\r
+CFLAGS = $(CFBASE) $(CFDBG)\r
+LFLAGS = $(LFBASE) $(LFDBG)\r
+CLIB = libcmtd.lib\r
+\r
+ifeq ($(MAKECMDGOALS),rel)\r
+ CFLAGS = $(CFBASE) $(CFREL)\r
+ LFLAGS = $(LFBASE) $(LFREL)\r
+ CLIB = libcmt.lib\r
+endif\r
+\r
+##### search paths #####\r
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))\r
+\r
+##### implicit rules #####\r
+$(OBJDIR)/%.obj : %.c\r
+ $(CC) $(CFLAGS) "/Fo$@" $<\r
+\r
+##### explicit rules #####\r
+$(BINARY) : $(OBJECTS)\r
+ $(LD) $(LFLAGS) $(CLIB) $(SYSLIBS) $(USRLIBS) $(OBJECTS) /out:$(BINARY)\r
+\r
+##### phony #####\r
+.PHONY : clean rel dbg\r
+\r
+clean : \r
+ @erase /Q $(OBJDIR)\*.obj $(BINDIR)\$(BINNAME).* $(QUIETLY)\r
+\r
+rel : $(BINARY)\r
+dbg : $(BINARY)\r
+\r
--- /dev/null
+/***** defines *****/\r
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)\r
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ !WIN_KERNEL_BUILD indicates Windows user-mode\r
+ */\r
+\r
+ #include <windows.h>\r
+ typedef HANDLE thread_state_t;\r
+ typedef DWORD thread_return_t;\r
+ #define CALLING_CONVENTION WINAPI\r
+#endif\r
+\r
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)\r
+ /* TRD : any Windows (kernel-mode) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ WIN_KERNEL_BUILD indicates Windows kernel\r
+ */\r
+\r
+ #include <wdm.h>\r
+ typedef HANDLE thread_state_t;\r
+ typedef VOID thread_return_t;\r
+ #define CALLING_CONVENTION \r
+#endif\r
+\r
+#if (defined __unix__ && __GNUC__)\r
+ /* TRD : any UNIX on any CPU with GCC\r
+\r
+ __unix__ indicates Solaris, Linux, HPUX, etc\r
+ __GNUC__ indicates GCC\r
+ */\r
+\r
+ #include <unistd.h>\r
+ #include <pthread.h>\r
+ typedef pthread_t thread_state_t;\r
+ typedef void * thread_return_t;\r
+ #define CALLING_CONVENTION \r
+#endif\r
+\r
+typedef thread_return_t (CALLING_CONVENTION *thread_function_t)( void *thread_user_state );\r
+\r
+/***** public prototypes *****/\r
+unsigned int abstraction_cpu_count( void );\r
+int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state );\r
+void abstraction_thread_wait( thread_state_t thread_state );\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ !WIN_KERNEL_BUILD indicates Windows user-mode\r
+ */\r
+\r
+ unsigned int abstraction_cpu_count()\r
+ {\r
+ SYSTEM_INFO\r
+ si;\r
+\r
+ GetNativeSystemInfo( &si );\r
+\r
+ return( (unsigned int) si.dwNumberOfProcessors );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ WIN_KERNEL_BUILD indicates Windows kernel\r
+ */\r
+\r
+ unsigned int abstraction_cpu_count()\r
+ {\r
+ unsigned int\r
+ active_processor_count;\r
+\r
+ active_processor_count = KeQueryActiveProcessorCount( NULL );\r
+\r
+ return( active_processor_count );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined __linux__ && __GNUC__)\r
+\r
+ /* TRD : Linux on any CPU with GCC\r
+\r
+ this function I believe is Linux specific and varies by UNIX flavour\r
+\r
+ __linux__ indicates Linux\r
+ __GNUC__ indicates GCC\r
+ */\r
+\r
+ unsigned int abstraction_cpu_count()\r
+ {\r
+ long int\r
+ cpu_count;\r
+\r
+ cpu_count = sysconf( _SC_NPROCESSORS_ONLN );\r
+\r
+ return( (unsigned int) cpu_count );\r
+ }\r
+\r
+#endif\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ !WIN_KERNEL_BUILD indicates Windows user-mode\r
+ */\r
+\r
+ int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state )\r
+ {\r
+ int\r
+ rv = 0;\r
+\r
+ DWORD\r
+ thread_id;\r
+\r
+ DWORD_PTR\r
+ affinity_mask,\r
+ result;\r
+\r
+ assert( thread_state != NULL );\r
+ // TRD : cpu can be any value in its range\r
+ assert( thread_function != NULL );\r
+ // TRD : thread_user_state can be NULL\r
+\r
+ affinity_mask = (DWORD_PTR) (1 << cpu);\r
+\r
+ *thread_state = CreateThread( NULL, 0, thread_function, thread_user_state, NO_FLAGS, &thread_id );\r
+\r
+ result = SetThreadAffinityMask( *thread_state, affinity_mask );\r
+\r
+ if( *thread_state != NULL and result != 0 )\r
+ rv = 1;\r
+\r
+ return( rv );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ WIN_KERNEL_BUILD indicates Windows kernel\r
+ */\r
+\r
+ int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state )\r
+ {\r
+ int\r
+ rv = 0;\r
+\r
+ KAFFINITY\r
+ affinity_mask\r
+\r
+ NTSTATUS\r
+ nts_create,\r
+ nts_affinity;\r
+\r
+ assert( thread_state != NULL );\r
+ // TRD : cpu can be any value in its range\r
+ assert( thread_function != NULL );\r
+ // TRD : thread_user_state can be NULL\r
+\r
+ affinity_mask = 1 << cpu;\r
+\r
+ nts_create = PsCreateSystemThread( thread_state, THREAD_ALL_ACCESS, NULL, NULL, NULL, thread_function, thread_user_state );\r
+\r
+ nts_affinity = ZwSetInformationThread( thread_state, ThreadAffinityMask, &affinity_mask, sizeof(KAFFINITY) );\r
+\r
+ if( nts_create == STATUS_SUCCESS and nts_affinity == STATUS_SUCCESS )\r
+ rv = 1;\r
+\r
+ return( rv );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined __unix__)\r
+\r
+ /* TRD : any UNIX on any CPU with any compiler\r
+\r
+ I assumed pthreads is available on any UNIX.\r
+\r
+ __unix__ indicates Solaris, Linux, HPUX, etc\r
+ */\r
+\r
+ int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state )\r
+ {\r
+ int\r
+ rv = 0,\r
+ rv_create;\r
+\r
+ assert( thread_state != NULL );\r
+ // TRD : cpu can be any value in its range\r
+ assert( thread_function != NULL );\r
+ // TRD : thread_user_state can be NULL\r
+\r
+ rv_create = pthread_create( thread_state, NULL, thread_function, thread_user_state );\r
+\r
+ if( rv_create == 0 )\r
+ rv = 1;\r
+\r
+ return( rv );\r
+ }\r
+\r
+#endif\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ !WIN_KERNEL_BUILD indicates Windows user-mode\r
+ */\r
+\r
+ void abstraction_thread_wait( thread_state_t thread_state )\r
+ {\r
+ WaitForSingleObject( thread_state, INFINITE );\r
+\r
+ return;\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ WIN_KERNEL_BUILD indicates Windows kernel\r
+ */\r
+\r
+ void abstraction_thread_wait( thread_state_t thread_state )\r
+ {\r
+ KeWaitForSingleObject( thread_state, Executive, KernelMode, FALSE, NULL );\r
+\r
+ return;\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined __unix__)\r
+\r
+ /* TRD : any UNIX on any CPU with any compiler\r
+\r
+ I assumed pthreads is available on any UNIX.\r
+\r
+ __unix__ indicates Solaris, Linux, HPUX, etc\r
+ */\r
+\r
+ void abstraction_thread_wait( thread_state_t thread_state )\r
+ {\r
+ pthread_join( thread_state, NULL );\r
+\r
+ return;\r
+ }\r
+\r
+#endif\r
+\r
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void benchmark_lfds601_freelist( void )
+{
+ unsigned int
+ loop,
+ thread_count,
+ cpu_count;
+
+ struct lfds601_freelist_state
+ *fs;
+
+ struct lfds601_freelist_benchmark
+ *fb;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds601_atom_t
+ total_operations_for_full_test_for_all_cpus,
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;
+
+ double
+ mean_operations_per_second_per_cpu,
+ difference_per_second_per_cpu,
+ total_difference_per_second_per_cpu,
+ std_dev_per_second_per_cpu,
+ scalability;
+
+ /* TRD : here we benchmark the freelist
+
+ the benchmark is to have a single freelist
+ where a worker thread busy-works popping and then pushing
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );
+
+ fb = (struct lfds601_freelist_benchmark *) malloc( sizeof(struct lfds601_freelist_benchmark) * cpu_count );
+
+ // TRD : print the benchmark ID and CSV header
+ printf( "\n"
+ "Release %s Freelist Benchmark #1\n"
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS601_RELEASE_NUMBER_STRING );
+
+ // TRD : we run CPU count times for scalability
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )
+ {
+ // TRD : initialisation
+ lfds601_freelist_new( &fs, 1000, NULL, NULL );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (fb+loop)->fs = fs;
+ (fb+loop)->operation_count = 0;
+ }
+
+ // TRD : main test
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds601_freelist_thread_pop_and_push, fb+loop );
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : post test math
+ total_operations_for_full_test_for_all_cpus = 0;
+ total_difference_per_second_per_cpu = 0;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ total_operations_for_full_test_for_all_cpus += (fb+loop)->operation_count;
+
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;
+
+ if( thread_count == 1 )
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ {
+ difference_per_second_per_cpu = ((double) (fb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;
+ }
+
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );
+
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);
+
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );
+
+ // TRD : cleanup
+ lfds601_freelist_delete( fs, NULL, NULL );
+ }
+
+ free( fb );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION benchmark_lfds601_freelist_thread_pop_and_push( void *lfds601_freelist_benchmark )
+{
+ struct lfds601_freelist_benchmark
+ *fb;
+
+ struct lfds601_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ assert( lfds601_freelist_benchmark != NULL );
+
+ fb = (struct lfds601_freelist_benchmark *) lfds601_freelist_benchmark;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds601_freelist_pop( fb->fs, &fe );
+ lfds601_freelist_push( fb->fs, fe );
+
+ fb->operation_count += 2;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void benchmark_lfds601_queue( void )
+{
+ unsigned int
+ loop,
+ thread_count,
+ cpu_count;
+
+ struct lfds601_queue_state
+ *qs;
+
+ struct lfds601_queue_benchmark
+ *qb;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds601_atom_t
+ total_operations_for_full_test_for_all_cpus,
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;
+
+ double
+ mean_operations_per_second_per_cpu,
+ difference_per_second_per_cpu,
+ total_difference_per_second_per_cpu,
+ std_dev_per_second_per_cpu,
+ scalability;
+
+ /* TRD : here we benchmark the queue
+
+ the benchmark is to have a single queue
+ where a worker thread busy-works dequeuing and then queuing
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );
+
+ qb = (struct lfds601_queue_benchmark *) malloc( sizeof(struct lfds601_queue_benchmark) * cpu_count );
+
+ // TRD : print the benchmark ID and CSV header
+ printf( "\n"
+ "Release %s Queue Benchmark #1\n"
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS601_RELEASE_NUMBER_STRING );
+
+ // TRD : we run CPU count times for scalability
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )
+ {
+ // TRD : initialisation
+ lfds601_queue_new( &qs, 1000 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (qb+loop)->qs = qs;
+ (qb+loop)->operation_count = 0;
+ }
+
+ // TRD : populate the queue (we don't actually use the user data)
+ for( loop = 0 ; loop < 500 ; loop++ )
+ lfds601_queue_enqueue( qs, (void *) (lfds601_atom_t) loop );
+
+ // TRD : main test
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds601_queue_thread_delfds601_queue_and_enqueue, qb+loop );
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : post test math
+ total_operations_for_full_test_for_all_cpus = 0;
+ total_difference_per_second_per_cpu = 0;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ total_operations_for_full_test_for_all_cpus += (qb+loop)->operation_count;
+
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;
+
+ if( thread_count == 1 )
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ {
+ difference_per_second_per_cpu = ((double) (qb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;
+ }
+
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );
+
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);
+
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );
+
+ // TRD : cleanup
+ lfds601_queue_delete( qs, NULL, NULL );
+ }
+
+ free( qb );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION benchmark_lfds601_queue_thread_delfds601_queue_and_enqueue( void *lfds601_queue_benchmark )
+{
+ struct lfds601_queue_benchmark
+ *qb;
+
+ void
+ *user_data;
+
+ time_t
+ start_time;
+
+ assert( lfds601_queue_benchmark != NULL );
+
+ qb = (struct lfds601_queue_benchmark *) lfds601_queue_benchmark;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds601_queue_dequeue( qb->qs, &user_data );
+ lfds601_queue_enqueue( qb->qs, user_data );
+
+ qb->operation_count += 2;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void benchmark_lfds601_ringbuffer( void )
+{
+ unsigned int
+ loop,
+ thread_count,
+ cpu_count;
+
+ struct lfds601_ringbuffer_state
+ *rs;
+
+ struct lfds601_ringbuffer_benchmark
+ *rb;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds601_atom_t
+ total_operations_for_full_test_for_all_cpus,
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;
+
+ double
+ mean_operations_per_second_per_cpu,
+ difference_per_second_per_cpu,
+ total_difference_per_second_per_cpu,
+ std_dev_per_second_per_cpu,
+ scalability;
+
+ /* TRD : here we benchmark the ringbuffer
+
+ the benchmark is to have a single ringbuffer
+ where a worker thread busy-works writing and then reading
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );
+
+ rb = (struct lfds601_ringbuffer_benchmark *) malloc( sizeof(struct lfds601_ringbuffer_benchmark) * cpu_count );
+
+ // TRD : print the benchmark ID and CSV header
+ printf( "\n"
+ "Release %s Ringbuffer Benchmark #1\n"
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS601_RELEASE_NUMBER_STRING );
+
+ // TRD : we run CPU count times for scalability
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )
+ {
+ // TRD : initialisation
+ lfds601_ringbuffer_new( &rs, 1000, NULL, NULL );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (rb+loop)->rs = rs;
+ (rb+loop)->operation_count = 0;
+ }
+
+ // TRD : main test
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds601_ringbuffer_thread_write_and_read, rb+loop );
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : post test math
+ total_operations_for_full_test_for_all_cpus = 0;
+ total_difference_per_second_per_cpu = 0;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ total_operations_for_full_test_for_all_cpus += (rb+loop)->operation_count;
+
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;
+
+ if( thread_count == 1 )
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ {
+ difference_per_second_per_cpu = ((double) (rb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;
+ }
+
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );
+
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);
+
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );
+
+ // TRD : cleanup
+ lfds601_ringbuffer_delete( rs, NULL, NULL );
+ }
+
+ free( rb );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION benchmark_lfds601_ringbuffer_thread_write_and_read( void *lfds601_ringbuffer_benchmark )
+{
+ struct lfds601_ringbuffer_benchmark
+ *rb;
+
+ struct lfds601_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ assert( lfds601_ringbuffer_benchmark != NULL );
+
+ rb = (struct lfds601_ringbuffer_benchmark *) lfds601_ringbuffer_benchmark;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds601_ringbuffer_get_write_element( rb->rs, &fe, NULL );
+ lfds601_ringbuffer_put_write_element( rb->rs, fe );
+
+ lfds601_ringbuffer_get_read_element( rb->rs, &fe );
+ lfds601_ringbuffer_put_read_element( rb->rs, fe );
+
+ rb->operation_count += 2;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void benchmark_lfds601_stack( void )
+{
+ unsigned int
+ loop,
+ thread_count,
+ cpu_count;
+
+ struct lfds601_stack_state
+ *ss;
+
+ struct lfds601_stack_benchmark
+ *sb;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds601_atom_t
+ total_operations_for_full_test_for_all_cpus,
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;
+
+ double
+ mean_operations_per_second_per_cpu,
+ difference_per_second_per_cpu,
+ total_difference_per_second_per_cpu,
+ std_dev_per_second_per_cpu,
+ scalability;
+
+ /* TRD : here we benchmark the stack
+
+ the benchmark is to have a single stack
+ where a worker thread busy-works pushing then popping
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );
+
+ sb = (struct lfds601_stack_benchmark *) malloc( sizeof(struct lfds601_stack_benchmark) * cpu_count );
+
+ // TRD : print the benchmark ID and CSV header
+ printf( "\n"
+ "Release %s Stack Benchmark #1\n"
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS601_RELEASE_NUMBER_STRING );
+
+ // TRD : we run CPU count times for scalability
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )
+ {
+ // TRD : initialisation
+ lfds601_stack_new( &ss, 1000 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (sb+loop)->ss = ss;
+ (sb+loop)->operation_count = 0;
+ }
+
+ // TRD : main test
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds601_stack_thread_push_and_pop, sb+loop );
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : post test math
+ total_operations_for_full_test_for_all_cpus = 0;
+ total_difference_per_second_per_cpu = 0;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ total_operations_for_full_test_for_all_cpus += (sb+loop)->operation_count;
+
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;
+
+ if( thread_count == 1 )
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ {
+ difference_per_second_per_cpu = ((double) (sb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;
+ }
+
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );
+
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);
+
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );
+
+ // TRD : cleanup
+ lfds601_stack_delete( ss, NULL, NULL );
+ }
+
+ free( sb );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION benchmark_lfds601_stack_thread_push_and_pop( void *lfds601_stack_benchmark )
+{
+ struct lfds601_stack_benchmark
+ *sb;
+
+ void
+ *user_data = NULL;
+
+ time_t
+ start_time;
+
+ assert( lfds601_stack_benchmark != NULL );
+
+ sb = (struct lfds601_stack_benchmark *) lfds601_stack_benchmark;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds601_stack_push( sb->ss, user_data );
+ lfds601_stack_pop( sb->ss, &user_data );
+
+ sb->operation_count += 2;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** ANSI includes *****/\r
+#include <assert.h>\r
+#include <math.h>\r
+#include <stdio.h>\r
+#include <stdarg.h>\r
+#include <stdlib.h>\r
+#include <string.h>\r
+#include <time.h>\r
+\r
+/***** internal includes *****/\r
+#include "abstraction.h"\r
+\r
+/***** external includes *****/\r
+#include "liblfds601.h"\r
+\r
+/***** defines *****/\r
+#define and &&\r
+#define or ||\r
+\r
+#define RAISED 1\r
+#define LOWERED 0\r
+\r
+#define NO_FLAGS 0x0\r
+\r
+/***** enums *****/\r
+enum lfds601_test_operation\r
+{\r
+ UNKNOWN,\r
+ HELP,\r
+ TEST,\r
+ BENCHMARK\r
+};\r
+\r
+/***** structs *****/\r
+#include "structures.h"\r
+\r
+/***** prototypes *****/\r
+int main( int argc, char **argv );\r
+\r
+void internal_display_test_name( char *test_name );\r
+void internal_display_test_result( unsigned int number_name_dvs_pairs, ... );\r
+void internal_display_data_structure_validity( enum lfds601_data_structure_validity dvs );\r
+\r
+void benchmark_lfds601_freelist( void );\r
+ thread_return_t CALLING_CONVENTION benchmark_lfds601_freelist_thread_pop_and_push( void *lfds601_freelist_benchmark );\r
+\r
+void benchmark_lfds601_queue( void );\r
+ thread_return_t CALLING_CONVENTION benchmark_lfds601_queue_thread_delfds601_queue_and_enqueue( void *lfds601_queue_benchmark );\r
+\r
+void benchmark_lfds601_ringbuffer( void );\r
+ thread_return_t CALLING_CONVENTION benchmark_lfds601_ringbuffer_thread_write_and_read( void *lfds601_ringbuffer_benchmark );\r
+\r
+void benchmark_lfds601_stack( void );\r
+ thread_return_t CALLING_CONVENTION benchmark_lfds601_stack_thread_push_and_pop( void *lfds601_stack_benchmark );\r
+\r
+void test_lfds601_abstraction( void );\r
+ void abstraction_test_increment( void );\r
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_increment( void *shared_counter );\r
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_atomic_increment( void *shared_counter );\r
+ void abstraction_test_dcas( void );\r
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_dcas( void *abstraction_test_dcas_state );\r
+\r
+void test_lfds601_freelist( void );\r
+ void freelist_test_internal_popping( void );\r
+ int freelist_test_internal_popping_init( void **user_data, void *user_state );\r
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping( void *freelist_test_popping_state );\r
+ void freelist_test_internal_pushing( void );\r
+ int freelist_test_internal_pushing_init( void **user_data, void *user_state );\r
+ void freelist_test_internal_pushing_delete( void *user_data, void *user_state );\r
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_pushing( void *freelist_test_pushing_state );\r
+ void freelist_test_internal_popping_and_pushing( void );\r
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_popping( void *freelist_test_popping_and_pushing_state );\r
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_pushing( void *freelist_test_popping_and_pushing_state );\r
+ void freelist_test_internal_rapid_popping_and_pushing( void );\r
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_rapid_popping_and_pushing( void *lfds601_freelist_state );\r
+\r
+void test_lfds601_queue( void );\r
+ void queue_test_enqueuing( void );\r
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_enqueuer( void *queue_test_enqueuing_state );\r
+ void queue_test_dequeuing( void );\r
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_dequeuer( void *queue_test_dequeuing_state );\r
+ void queue_test_enqueuing_and_dequeuing( void );\r
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_enqueuer_and_dequeuer( void *queue_test_rapid_enqueuing_and_dequeuing_state );\r
+ void queue_test_rapid_enqueuing_and_dequeuing( void );\r
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_rapid_enqueuer_and_dequeuer( void *queue_test_rapid_enqueuing_and_dequeuing_state );\r
+\r
+void test_lfds601_ringbuffer( void );\r
+ void ringbuffer_test_reading( void );\r
+ thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_reader( void *ringbuffer_test_reading_state );\r
+ void ringbuffer_test_writing( void );\r
+ thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_writer( void *ringbuffer_test_writing_state );\r
+ void ringbuffer_test_reading_and_writing( void );\r
+ thread_return_t CALLING_CONVENTION ringbuffer_test_thread_reader_writer( void *ringbuffer_test_reading_and_writing_state );\r
+\r
+void test_lfds601_slist( void );\r
+ thread_return_t CALLING_CONVENTION lfds601_slist_internal_thread_head_writer( void *lfds601_slist_thread_start_state );\r
+ thread_return_t CALLING_CONVENTION lfds601_slist_internal_thread_after_writer( void *lfds601_slist_thread_start_state );\r
+ thread_return_t CALLING_CONVENTION lfds601_slist_internal_thread_traverser( void *lfds601_slist_thread_start_state );\r
+ thread_return_t CALLING_CONVENTION lfds601_slist_internal_thread_deleter_traverser( void *lfds601_slist_thread_start_state );\r
+\r
+void test_lfds601_stack( void );\r
+ thread_return_t CALLING_CONVENTION lfds601_stack_internal_thread_reader( void *lfds601_stack_state );\r
+ thread_return_t CALLING_CONVENTION lfds601_stack_internal_thread_writer( void *lfds601_stack_state );\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int main( int argc, char **argv )\r
+{\r
+ enum lfds601_test_operation\r
+ operation = UNKNOWN;\r
+\r
+ unsigned int\r
+ loop,\r
+ iterations = 1;\r
+\r
+ assert( argc >= 1 );\r
+ assert( argv != NULL );\r
+\r
+ if( argc == 1 or argc >= 4 )\r
+ operation = HELP;\r
+\r
+ if( operation == UNKNOWN )\r
+ {\r
+ if( 0 == strcmp(*(argv+1), "test") )\r
+ {\r
+ operation = TEST;\r
+\r
+ // TRD : sscanf() may fail, but iterations is initialised to 1, so it's okay\r
+ if( argc == 3 )\r
+ sscanf( *(argv+2), "%u", &iterations );\r
+ }\r
+\r
+ if( 0 == strcmp(*(argv+1), "benchmark") )\r
+ operation = BENCHMARK;\r
+ }\r
+\r
+ switch( operation )\r
+ {\r
+ case UNKNOWN:\r
+ case HELP:\r
+ printf( "test [test|benchmark] [iterations]\n"\r
+ " test : run the test suite\n"\r
+ " benchmark : run the benchmark suite\n"\r
+ " iterations : optional, only applies to tests, default is 1\n" );\r
+ break;\r
+\r
+ case TEST:\r
+ for( loop = 1 ; loop < iterations+1 ; loop++ )\r
+ {\r
+ printf( "\n"\r
+ "Test Iteration %02u\n"\r
+ "=================\n", loop );\r
+\r
+ test_lfds601_abstraction();\r
+ test_lfds601_freelist();\r
+ test_lfds601_queue();\r
+ test_lfds601_ringbuffer();\r
+ test_lfds601_slist();\r
+ test_lfds601_stack();\r
+ }\r
+ break;\r
+\r
+ case BENCHMARK:\r
+ benchmark_lfds601_freelist();\r
+ benchmark_lfds601_queue();\r
+ benchmark_lfds601_ringbuffer();\r
+ benchmark_lfds601_stack();\r
+ break;\r
+ }\r
+\r
+ return( EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void internal_display_test_name( char *test_name )\r
+{\r
+ assert( test_name != NULL );\r
+\r
+ printf( "%s...", test_name );\r
+ fflush( stdout );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void internal_display_test_result( unsigned int number_name_dvs_pairs, ... )\r
+{\r
+ va_list\r
+ va;\r
+\r
+ int\r
+ passed_flag = RAISED;\r
+\r
+ unsigned int\r
+ loop;\r
+\r
+ char\r
+ *name;\r
+\r
+ enum lfds601_data_structure_validity\r
+ dvs;\r
+\r
+ // TRD : number_name_dvs_pairs can be any value in its range\r
+\r
+ va_start( va, number_name_dvs_pairs );\r
+\r
+ for( loop = 0 ; loop < number_name_dvs_pairs ; loop++ )\r
+ {\r
+ name = va_arg( va, char * );\r
+ dvs = va_arg( va, enum lfds601_data_structure_validity );\r
+\r
+ if( dvs != LFDS601_VALIDITY_VALID )\r
+ {\r
+ passed_flag = LOWERED;\r
+ break;\r
+ }\r
+ }\r
+\r
+ va_end( va );\r
+\r
+ if( passed_flag == RAISED )\r
+ puts( "passed" );\r
+\r
+ if( passed_flag == LOWERED )\r
+ {\r
+ printf( "failed (" );\r
+\r
+ va_start( va, number_name_dvs_pairs );\r
+\r
+ for( loop = 0 ; loop < number_name_dvs_pairs ; loop++ )\r
+ {\r
+ name = va_arg( va, char * );\r
+ dvs = va_arg( va, enum lfds601_data_structure_validity );\r
+\r
+ printf( "%s ", name );\r
+ internal_display_data_structure_validity( dvs );\r
+\r
+ if( loop+1 < number_name_dvs_pairs )\r
+ printf( ", " );\r
+ }\r
+\r
+ va_end( va );\r
+\r
+ printf( ")\n" );\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void internal_display_data_structure_validity( enum lfds601_data_structure_validity dvs )\r
+{\r
+ char\r
+ *string = NULL;\r
+\r
+ switch( dvs )\r
+ {\r
+ case LFDS601_VALIDITY_VALID:\r
+ string = "valid";\r
+ break;\r
+\r
+ case LFDS601_VALIDITY_INVALID_LOOP:\r
+ string = "invalid - loop detected";\r
+ break;\r
+\r
+ case LFDS601_VALIDITY_INVALID_MISSING_ELEMENTS:\r
+ string = "invalid - missing elements";\r
+ break;\r
+\r
+ case LFDS601_VALIDITY_INVALID_ADDITIONAL_ELEMENTS:\r
+ string = "invalid - additional elements";\r
+ break;\r
+\r
+ case LFDS601_VALIDITY_INVALID_TEST_DATA:\r
+ string = "invalid - invalid test data";\r
+ break;\r
+ }\r
+\r
+ printf( "%s", string );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+/***** structs *****/\r
+#pragma pack( push, LFDS601_ALIGN_DOUBLE_POINTER )\r
+\r
+/***** abstraction tests *****/\r
+struct abstraction_test_dcas_state\r
+{\r
+ volatile lfds601_atom_t\r
+ *shared_counter;\r
+\r
+ lfds601_atom_t\r
+ local_counter;\r
+};\r
+\r
+/***** freelist tests *****/\r
+struct freelist_test_popping_state\r
+{\r
+ struct lfds601_freelist_state\r
+ *fs,\r
+ *fs_thread_local;\r
+};\r
+\r
+struct freelist_test_pushing_state\r
+{\r
+ lfds601_atom_t\r
+ thread_number;\r
+\r
+ struct lfds601_freelist_state\r
+ *source_fs,\r
+ *fs;\r
+};\r
+\r
+struct freelist_test_popping_and_pushing_state\r
+{\r
+ struct lfds601_freelist_state\r
+ *local_fs,\r
+ *fs;\r
+};\r
+\r
+struct freelist_test_counter_and_thread_number\r
+{\r
+ lfds601_atom_t\r
+ thread_number;\r
+\r
+ unsigned long long int\r
+ counter;\r
+};\r
+\r
+/***** queue tests *****/\r
+struct queue_test_enqueuing_state\r
+{\r
+ struct lfds601_queue_state\r
+ *qs;\r
+\r
+ lfds601_atom_t\r
+ counter;\r
+};\r
+\r
+struct queue_test_dequeuing_state\r
+{\r
+ struct lfds601_queue_state\r
+ *qs;\r
+\r
+ int\r
+ error_flag;\r
+};\r
+\r
+struct queue_test_enqueuing_and_dequeuing_state\r
+{\r
+ struct lfds601_queue_state\r
+ *qs;\r
+\r
+ lfds601_atom_t\r
+ counter,\r
+ thread_number,\r
+ *per_thread_counters;\r
+\r
+ unsigned int\r
+ cpu_count;\r
+\r
+ int\r
+ error_flag;\r
+};\r
+\r
+struct queue_test_rapid_enqueuing_and_dequeuing_state\r
+{\r
+ struct lfds601_queue_state\r
+ *qs;\r
+\r
+ lfds601_atom_t\r
+ counter;\r
+};\r
+\r
+/***** ringbuffer tests *****/\r
+struct ringbuffer_test_reading_state\r
+{\r
+ struct lfds601_ringbuffer_state\r
+ *rs;\r
+\r
+ int\r
+ error_flag;\r
+\r
+ lfds601_atom_t\r
+ read_count;\r
+};\r
+\r
+struct ringbuffer_test_writing_state\r
+{\r
+ struct lfds601_ringbuffer_state\r
+ *rs;\r
+\r
+ lfds601_atom_t\r
+ write_count;\r
+};\r
+\r
+struct ringbuffer_test_reading_and_writing_state\r
+{\r
+ struct lfds601_ringbuffer_state\r
+ *rs;\r
+\r
+ lfds601_atom_t\r
+ counter,\r
+ *per_thread_counters;\r
+\r
+ unsigned int\r
+ cpu_count;\r
+\r
+ int\r
+ error_flag;\r
+};\r
+\r
+/***** slist tests *****/\r
+struct lfds601_slist_thread_start_state\r
+{\r
+ struct lfds601_slist_state\r
+ *ss;\r
+\r
+ struct lfds601_slist_element\r
+ *se;\r
+\r
+ time_t\r
+ duration;\r
+\r
+ unsigned long int\r
+ iteration_modulo;\r
+};\r
+\r
+/***** stack tests *****/\r
+\r
+/***** freelist benchmarks *****/\r
+struct lfds601_freelist_benchmark\r
+{\r
+ struct lfds601_freelist_state\r
+ *fs;\r
+\r
+ lfds601_atom_t\r
+ operation_count;\r
+};\r
+\r
+/***** queue benchmarks *****/\r
+struct lfds601_queue_benchmark\r
+{\r
+ struct lfds601_queue_state\r
+ *qs;\r
+\r
+ lfds601_atom_t\r
+ operation_count;\r
+};\r
+\r
+/***** ringbuffer benchmarks *****/\r
+struct lfds601_ringbuffer_benchmark\r
+{\r
+ struct lfds601_ringbuffer_state\r
+ *rs;\r
+\r
+ lfds601_atom_t\r
+ operation_count;\r
+};\r
+\r
+/***** stack benchmarks *****/\r
+struct lfds601_stack_benchmark\r
+{\r
+ struct lfds601_stack_state\r
+ *ss;\r
+\r
+ lfds601_atom_t\r
+ operation_count;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_lfds601_abstraction( void )\r
+{\r
+ printf( "\n"\r
+ "Abstraction Tests\n"\r
+ "=================\n" );\r
+\r
+ abstraction_test_increment();\r
+ abstraction_test_dcas();\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void abstraction_test_increment( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ lfds601_atom_t\r
+ shared_counter = 0,\r
+ atomic_shared_counter = 0;\r
+\r
+ /* TRD : here we test lfds601_abstraction_increment\r
+\r
+ first, we run one thread per CPU where each thread increments\r
+ a shared counter 10,000,000 times - however, this first test\r
+ does NOT use atomic increment; it uses "++"\r
+\r
+ second, we repeat the exercise, but this time using\r
+ lfds601_abstraction_increment()\r
+\r
+ if the final value in the first test is less than (10,000,000*cpu_count)\r
+ then the system is sensitive to non-atomic increments; this means if\r
+ our atomic version of the test passes, we can have some degree of confidence\r
+ that it works\r
+\r
+ if the final value in the first test is in fact correct, then we can't know\r
+ that our atomic version has changed anything\r
+\r
+ and of course if the final value in the atomic test is wrong, we know things\r
+ are broken\r
+ */\r
+\r
+ internal_display_test_name( "Atomic increment" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ // TRD : non-atomic\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_increment, &shared_counter );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ // TRD : atomic\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_atomic_increment, &atomic_shared_counter );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : results\r
+ if( shared_counter < (10000000 * cpu_count) and atomic_shared_counter == (10000000 * cpu_count) )\r
+ puts( "passed" );\r
+\r
+ if( shared_counter == (10000000 * cpu_count) and atomic_shared_counter == (10000000 * cpu_count) )\r
+ puts( "indeterminate" );\r
+\r
+ if( atomic_shared_counter < (10000000 * cpu_count) )\r
+ puts( "failed" );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_increment( void *shared_counter )\r
+{\r
+ volatile lfds601_atom_t\r
+ count = 0;\r
+\r
+ /* TRD : lfds601_atom_t must be volatile or the compiler\r
+ optimizes it away into a single store\r
+ */\r
+\r
+ assert( shared_counter != NULL );\r
+\r
+ while( count++ < 10000000 )\r
+ (*(lfds601_atom_t *) shared_counter)++;\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_atomic_increment( void *shared_counter )\r
+{\r
+ lfds601_atom_t\r
+ count = 0;\r
+\r
+ assert( shared_counter != NULL );\r
+\r
+ while( count++ < 10000000 )\r
+ lfds601_abstraction_increment( shared_counter );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void abstraction_test_dcas( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct abstraction_test_dcas_state\r
+ *atds;\r
+\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) volatile lfds601_atom_t\r
+ shared_counter[2] = { 0, 0 };\r
+\r
+ lfds601_atom_t\r
+ local_total = 0;\r
+\r
+ /* TRD : here we test lfds601_abstraction_dcas\r
+\r
+ we run one thread per CPU\r
+ we use lfds601_abstraction_dcas() to increment a shared counter\r
+ every time a thread successfully increments the counter,\r
+ it increments a thread local counter\r
+ the threads run for ten seconds\r
+ after the threads finish, we total the local counters\r
+ they should equal the shared counter\r
+ */\r
+\r
+ internal_display_test_name( "Atomic DCAS" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ atds = malloc( sizeof(struct abstraction_test_dcas_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (atds+loop)->shared_counter = shared_counter;\r
+ (atds+loop)->local_counter = 0;\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_dcas, atds+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : results\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ local_total += (atds+loop)->local_counter;\r
+\r
+ if( local_total == shared_counter[0] )\r
+ puts( "passed" );\r
+\r
+ if( local_total != shared_counter[0] )\r
+ puts( "failed" );\r
+\r
+ // TRD : cleanup\r
+ free( atds );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_dcas( void *abstraction_test_dcas_state )\r
+{\r
+ struct abstraction_test_dcas_state\r
+ *atds;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ LFDS601_ALIGN(LFDS601_ALIGN_DOUBLE_POINTER) lfds601_atom_t\r
+ exchange[2],\r
+ compare[2];\r
+\r
+ assert( abstraction_test_dcas_state != NULL );\r
+\r
+ atds = (struct abstraction_test_dcas_state *) abstraction_test_dcas_state;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ compare[0] = *atds->shared_counter;\r
+ compare[1] = *(atds->shared_counter+1);\r
+\r
+ do\r
+ {\r
+ exchange[0] = compare[0] + 1;\r
+ exchange[1] = compare[1];\r
+ }\r
+ while( 0 == lfds601_abstraction_dcas(atds->shared_counter, exchange, compare) );\r
+\r
+ atds->local_counter++;\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_lfds601_freelist( void )\r
+{\r
+ printf( "\n"\r
+ "Freelist Tests\n"\r
+ "==============\n" );\r
+\r
+ freelist_test_internal_popping();\r
+ freelist_test_internal_pushing();\r
+ freelist_test_internal_popping_and_pushing();\r
+ freelist_test_internal_rapid_popping_and_pushing();\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void freelist_test_internal_popping( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count,\r
+ count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ enum lfds601_data_structure_validity\r
+ dvs = LFDS601_VALIDITY_VALID;\r
+\r
+ struct lfds601_freelist_state\r
+ *fs;\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ struct freelist_test_popping_state\r
+ *ftps;\r
+\r
+ unsigned int\r
+ *found_count;\r
+\r
+ /* TRD : we create a freelist with 1,000,000 elements\r
+\r
+ the creation function runs in a single thread and creates\r
+ and pushes those elements onto the freelist\r
+\r
+ each element contains a void pointer which is its element number\r
+\r
+ we then run one thread per CPU\r
+ where each thread loops, popping as quickly as possible\r
+ each popped element is pushed onto a thread-local freelist\r
+\r
+ the threads run till the source freelist is empty\r
+\r
+ we then check the thread-local freelists\r
+ we should find we have every element\r
+\r
+ then tidy up\r
+ */\r
+\r
+ internal_display_test_name( "Popping" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds601_freelist_new( &fs, 1000000, freelist_test_internal_popping_init, NULL );\r
+ ftps = malloc( sizeof(struct freelist_test_popping_state) * cpu_count );\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (ftps+loop)->fs = fs;\r
+ lfds601_freelist_new( &(ftps+loop)->fs_thread_local, 0, NULL, NULL );\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_popping, ftps+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : now we check the thread-local freelists\r
+ found_count = malloc( sizeof(unsigned int) * 1000000 );\r
+ for( loop = 0 ; loop < 1000000 ; loop++ )\r
+ *(found_count+loop) = 0;\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ while( lfds601_freelist_pop((ftps+loop)->fs_thread_local, &fe) )\r
+ {\r
+ lfds601_freelist_get_user_data_from_element( fe, (void **) &count );\r
+ (*(found_count+count))++;\r
+ lfds601_freelist_push( fs, fe );\r
+ }\r
+ }\r
+\r
+ for( loop = 0 ; loop < 1000000 and dvs == LFDS601_VALIDITY_VALID ; loop++ )\r
+ {\r
+ if( *(found_count+loop) == 0 )\r
+ dvs = LFDS601_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ if( *(found_count+loop) > 1 )\r
+ dvs = LFDS601_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+ }\r
+\r
+ // TRD : cleanup\r
+ free( found_count );\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ lfds601_freelist_delete( (ftps+loop)->fs_thread_local, NULL, NULL );\r
+ lfds601_freelist_delete( fs, NULL, NULL );\r
+\r
+ // TRD : print the test result\r
+ internal_display_test_result( 1, "freelist", dvs );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+int freelist_test_internal_popping_init( void **user_data, void *user_state )\r
+{\r
+ static lfds601_atom_t\r
+ count = 0;\r
+\r
+ assert( user_data != NULL );\r
+ assert( user_state == NULL );\r
+\r
+ *(lfds601_atom_t *) user_data = count++;\r
+\r
+ return( 1 );\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping( void *freelist_test_popping_state )\r
+{\r
+ struct freelist_test_popping_state\r
+ *ftps;\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ assert( freelist_test_popping_state != NULL );\r
+\r
+ ftps = (struct freelist_test_popping_state *) freelist_test_popping_state;\r
+\r
+ while( lfds601_freelist_pop(ftps->fs, &fe) )\r
+ lfds601_freelist_push( ftps->fs_thread_local, fe );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void freelist_test_internal_pushing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ enum lfds601_data_structure_validity\r
+ dvs;\r
+\r
+ struct freelist_test_pushing_state\r
+ *ftps;\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ struct lfds601_freelist_state\r
+ *fs,\r
+ *cleanup_fs;\r
+\r
+ struct freelist_test_counter_and_thread_number\r
+ *cnt,\r
+ *counter_and_number_trackers;\r
+\r
+ struct lfds601_validation_info\r
+ vi = { 1000000, 1000000 };\r
+\r
+ /* TRD : we create an empty freelist, which we will push to\r
+\r
+ we then create one freelist per CPU, where this freelist\r
+ contains 1,000,000/cpu_count number of elements and\r
+ each element is an incrementing counter and unique ID\r
+ (from 0 to number of CPUs)\r
+\r
+ we then start one thread per CPU, where each thread is\r
+ given one of the populated freelists and pops from that\r
+ to push to the empty freelist\r
+\r
+ the reason for this is to achieve memory pre-allocation\r
+ which allows the pushing threads to run at maximum speed\r
+\r
+ the threads end when their freelists are empty\r
+\r
+ we then fully pop the now populated main freelist (onto\r
+ a second freelist, so we can cleanly free all memory),\r
+ checking that the counts increment on a per unique ID basis\r
+ and that the number of elements we pop equals 1,000,000\r
+ (since each element has an incrementing counter which is\r
+ unique on a per unique ID basis, we can know we didn't lose\r
+ any elements)\r
+ */\r
+\r
+ internal_display_test_name( "Pushing" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ ftps = malloc( sizeof(struct freelist_test_pushing_state) * cpu_count );\r
+\r
+ lfds601_freelist_new( &fs, 0, NULL, NULL );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (ftps+loop)->thread_number = (lfds601_atom_t) loop;\r
+ lfds601_freelist_new( &(ftps+loop)->source_fs, 1000000 / cpu_count, freelist_test_internal_pushing_init, (void *) (lfds601_atom_t) loop );\r
+ (ftps+loop)->fs = fs;\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_pushing, ftps+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : now fully pop and verify the main freelist\r
+ lfds601_freelist_new( &cleanup_fs, 0, NULL, NULL );\r
+\r
+ counter_and_number_trackers = malloc( sizeof(struct freelist_test_counter_and_thread_number) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (counter_and_number_trackers+loop)->counter = (1000000 / cpu_count) * loop;\r
+ (counter_and_number_trackers+loop)->thread_number = (lfds601_atom_t) loop;\r
+ }\r
+\r
+ lfds601_freelist_query( fs, LFDS601_FREELIST_QUERY_VALIDATE, &vi, (void *) &dvs );\r
+\r
+ while( dvs == LFDS601_VALIDITY_VALID and lfds601_freelist_pop(fs, &fe) )\r
+ {\r
+ static int count = 0;\r
+\r
+ lfds601_freelist_get_user_data_from_element( fe, (void **) &cnt );\r
+\r
+ if( cnt->counter != (counter_and_number_trackers+cnt->thread_number)->counter++ )\r
+ dvs = LFDS601_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ lfds601_freelist_push( cleanup_fs, fe );\r
+\r
+ count++;\r
+ }\r
+\r
+ // TRD : clean up\r
+ free( counter_and_number_trackers );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ lfds601_freelist_delete( (ftps+loop)->source_fs, NULL, NULL );\r
+\r
+ free( ftps );\r
+\r
+ lfds601_freelist_delete( cleanup_fs, freelist_test_internal_pushing_delete, NULL );\r
+ lfds601_freelist_delete( fs, NULL, NULL );\r
+\r
+ // TRD : print the test result\r
+ internal_display_test_result( 1, "freelist", dvs );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int freelist_test_internal_pushing_init( void **user_data, void *user_state )\r
+{\r
+ struct freelist_test_counter_and_thread_number\r
+ *ftcatn;\r
+\r
+ static lfds601_atom_t\r
+ counter = 0;\r
+\r
+ assert( user_data != NULL );\r
+ // TRD : user_state is being used as an integer type\r
+\r
+ *user_data = malloc( sizeof(struct freelist_test_counter_and_thread_number) );\r
+\r
+ ftcatn = (struct freelist_test_counter_and_thread_number *) *user_data;\r
+\r
+ ftcatn->counter = counter++;\r
+ ftcatn->thread_number = (lfds601_atom_t) user_state;\r
+\r
+ return( 1 );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void freelist_test_internal_pushing_delete( void *user_data, void *user_state )\r
+{\r
+ assert( user_data != NULL );\r
+ assert( user_state == NULL );\r
+\r
+ free( user_data );\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_pushing( void *freelist_test_pushing_state )\r
+{\r
+ struct freelist_test_pushing_state\r
+ *ftps;\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ assert( freelist_test_pushing_state != NULL );\r
+\r
+ ftps = (struct freelist_test_pushing_state *) freelist_test_pushing_state;\r
+\r
+ while( lfds601_freelist_pop(ftps->source_fs, &fe) )\r
+ lfds601_freelist_push( ftps->fs, fe );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void freelist_test_internal_popping_and_pushing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ enum lfds601_data_structure_validity\r
+ dvs;\r
+\r
+ struct lfds601_freelist_state\r
+ *fs;\r
+\r
+ struct freelist_test_popping_and_pushing_state\r
+ *pps;\r
+\r
+ struct lfds601_validation_info\r
+ vi;\r
+\r
+ /* TRD : we have two threads per CPU\r
+ the threads loop for ten seconds\r
+ the first thread pushes 100000 elements then pops 100000 elements\r
+ the second thread pops 100000 elements then pushes 100000 elements\r
+ all pushes and pops go onto the single main freelist\r
+\r
+ after time is up, all threads push what they have remaining onto\r
+ the main freelist\r
+\r
+ we then validate the main freelist\r
+ */\r
+\r
+ internal_display_test_name( "Popping and pushing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds601_freelist_new( &fs, 100000 * cpu_count, NULL, NULL );\r
+\r
+ pps = malloc( sizeof(struct freelist_test_popping_and_pushing_state) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (pps+loop)->fs = fs;\r
+ lfds601_freelist_new( &(pps+loop)->local_fs, 0, NULL, NULL );\r
+\r
+ (pps+loop+cpu_count)->fs = fs;\r
+ lfds601_freelist_new( &(pps+loop+cpu_count)->local_fs, 100000, NULL, NULL );\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_popping_and_pushing_start_popping, pps+loop );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, freelist_test_internal_thread_popping_and_pushing_start_pushing, pps+loop+cpu_count );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ lfds601_freelist_delete( (pps+loop)->local_fs, NULL, NULL );\r
+\r
+ free( pps );\r
+\r
+ vi.min_elements = vi.max_elements = 100000 * cpu_count * 2;\r
+\r
+ lfds601_freelist_query( fs, LFDS601_FREELIST_QUERY_VALIDATE, (void *) &vi, (void *) &dvs );\r
+\r
+ lfds601_freelist_delete( fs, NULL, NULL );\r
+\r
+ // TRD : print the test result\r
+ internal_display_test_result( 1, "freelist", dvs );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_popping( void *freelist_test_popping_and_pushing_state )\r
+{\r
+ struct freelist_test_popping_and_pushing_state\r
+ *pps;\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ unsigned int\r
+ count;\r
+\r
+ assert( freelist_test_popping_and_pushing_state != NULL );\r
+\r
+ pps = (struct freelist_test_popping_and_pushing_state *) freelist_test_popping_and_pushing_state;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ count = 0;\r
+\r
+ while( count < 100000 )\r
+ {\r
+ lfds601_freelist_pop( pps->fs, &fe );\r
+\r
+ if( fe != NULL )\r
+ {\r
+ lfds601_freelist_push( pps->local_fs, fe );\r
+ count++;\r
+ }\r
+ }\r
+\r
+ while( lfds601_freelist_pop(pps->local_fs, &fe) )\r
+ lfds601_freelist_push( pps->fs, fe );\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_pushing( void *freelist_test_popping_and_pushing_state )\r
+{\r
+ struct freelist_test_popping_and_pushing_state\r
+ *pps;\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ unsigned int\r
+ count;\r
+\r
+ assert( freelist_test_popping_and_pushing_state != NULL );\r
+\r
+ pps = (struct freelist_test_popping_and_pushing_state *) freelist_test_popping_and_pushing_state;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ while( lfds601_freelist_pop(pps->local_fs, &fe) )\r
+ lfds601_freelist_push( pps->fs, fe );\r
+\r
+ count = 0;\r
+\r
+ while( count < 1000 )\r
+ {\r
+ lfds601_freelist_pop( pps->fs, &fe );\r
+\r
+ if( fe != NULL )\r
+ {\r
+ lfds601_freelist_push( pps->local_fs, fe );\r
+ count++;\r
+ }\r
+ }\r
+ }\r
+\r
+ // TRD : now push whatever we have in our local freelist\r
+ while( lfds601_freelist_pop(pps->local_fs, &fe) )\r
+ lfds601_freelist_push( pps->fs, fe );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void freelist_test_internal_rapid_popping_and_pushing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds601_freelist_state\r
+ *fs;\r
+\r
+ struct lfds601_validation_info\r
+ vi;\r
+\r
+ enum lfds601_data_structure_validity\r
+ dvs;\r
+\r
+ /* TRD : in these tests there is a fundamental antagonism between\r
+ how much checking/memory clean up that we do and the\r
+ likelyhood of collisions between threads in their lock-free\r
+ operations\r
+\r
+ the lock-free operations are very quick; if we do anything\r
+ much at all between operations, we greatly reduce the chance\r
+ of threads colliding\r
+\r
+ so we have some tests which do enough checking/clean up that\r
+ they can tell the freelist is valid and don't leak memory\r
+ and here, this test now is one of those which does minimal\r
+ checking - in fact, the nature of the test is that you can't\r
+ do any real checking - but goes very quickly\r
+\r
+ what we do is create a small freelist and then run one thread\r
+ per CPU, where each thread simply pops and then immediately\r
+ pushes\r
+\r
+ the test runs for ten seconds\r
+\r
+ after the test is done, the only check we do is to traverse\r
+ the freelist, checking for loops and ensuring the number of\r
+ elements is correct\r
+ */\r
+\r
+ internal_display_test_name( "Rapid popping and pushing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds601_freelist_new( &fs, cpu_count, NULL, NULL );\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_rapid_popping_and_pushing, fs );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ vi.min_elements = cpu_count;\r
+ vi.max_elements = cpu_count;\r
+\r
+ lfds601_freelist_query( fs, LFDS601_FREELIST_QUERY_VALIDATE, (void *) &vi, (void *) &dvs );\r
+\r
+ lfds601_freelist_delete( fs, NULL, NULL );\r
+\r
+ // TRD : print the test result\r
+ internal_display_test_result( 1, "freelist", dvs );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_rapid_popping_and_pushing( void *lfds601_freelist_state )\r
+{\r
+ struct lfds601_freelist_state\r
+ *fs;\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ assert( lfds601_freelist_state != NULL );\r
+\r
+ fs = (struct lfds601_freelist_state *) lfds601_freelist_state;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds601_freelist_pop( fs, &fe );\r
+ lfds601_freelist_push( fs, fe );\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_lfds601_queue( void )\r
+{\r
+ printf( "\n"\r
+ "Queue Tests\n"\r
+ "===========\n" );\r
+\r
+ queue_test_enqueuing();\r
+ queue_test_dequeuing();\r
+ queue_test_enqueuing_and_dequeuing();\r
+ queue_test_rapid_enqueuing_and_dequeuing();\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void queue_test_enqueuing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds601_queue_state\r
+ *qs;\r
+\r
+ struct queue_test_enqueuing_state\r
+ *qtes;\r
+\r
+ lfds601_atom_t\r
+ user_data,\r
+ thread,\r
+ count,\r
+ *per_thread_counters;\r
+\r
+ struct lfds601_validation_info\r
+ vi = { 1000000, 1000000 };\r
+\r
+ enum lfds601_data_structure_validity\r
+ dvs[2];\r
+\r
+ /* TRD : create an empty queue with 1,000,000 elements in its freelist\r
+ then run one thread per CPU\r
+ where each thread busy-works, enqueuing elements (until there are no more elements)\r
+ each element's void pointer of user data is (thread number | element number)\r
+ where element_number is a thread-local counter starting at 0\r
+ where the thread_number occupies the top byte\r
+\r
+ when we're done, we check that all the elements are present\r
+ and increment on a per-thread basis\r
+ */\r
+\r
+ internal_display_test_name( "Enqueuing" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds601_queue_new( &qs, 1000000 );\r
+\r
+ qtes = malloc( sizeof(struct queue_test_enqueuing_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (qtes+loop)->qs = qs;\r
+ (qtes+loop)->counter = (lfds601_atom_t) loop << (sizeof(lfds601_atom_t)*8-8);\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_simple_enqueuer, qtes+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ free( qtes );\r
+\r
+ /* TRD : first, validate the queue\r
+\r
+ then dequeue\r
+ we expect to find element numbers increment on a per thread basis\r
+ */\r
+\r
+ lfds601_queue_query( qs, LFDS601_QUEUE_QUERY_VALIDATE, &vi, dvs );\r
+\r
+ per_thread_counters = malloc( sizeof(lfds601_atom_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ *(per_thread_counters+loop) = 0;\r
+\r
+ while( dvs[0] == LFDS601_VALIDITY_VALID and dvs[1] == LFDS601_VALIDITY_VALID and lfds601_queue_dequeue(qs, (void *) &user_data) )\r
+ {\r
+ thread = user_data >> (sizeof(lfds601_atom_t)*8-8);\r
+ count = (user_data << 8) >> 8;\r
+\r
+ if( thread >= cpu_count )\r
+ {\r
+ dvs[0] = LFDS601_VALIDITY_INVALID_TEST_DATA;\r
+ break;\r
+ }\r
+\r
+ if( count < per_thread_counters[thread] )\r
+ dvs[0] = LFDS601_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+\r
+ if( count > per_thread_counters[thread] )\r
+ dvs[0] = LFDS601_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ if( count == per_thread_counters[thread] )\r
+ per_thread_counters[thread]++;\r
+ }\r
+\r
+ free( per_thread_counters );\r
+\r
+ lfds601_queue_delete( qs, NULL, NULL );\r
+\r
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_enqueuer( void *queue_test_enqueuing_state )\r
+{\r
+ struct queue_test_enqueuing_state\r
+ *qtes;\r
+\r
+ assert( queue_test_enqueuing_state != NULL );\r
+\r
+ qtes = (struct queue_test_enqueuing_state *) queue_test_enqueuing_state;\r
+\r
+ // TRD : top byte of counter is already our thread number\r
+ while( lfds601_queue_enqueue(qtes->qs, (void *) qtes->counter++) );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void queue_test_dequeuing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds601_queue_state\r
+ *qs;\r
+\r
+ struct queue_test_dequeuing_state\r
+ *qtds;\r
+\r
+ struct lfds601_validation_info\r
+ vi = { 0, 0 };\r
+\r
+ enum lfds601_data_structure_validity\r
+ dvs[2];\r
+\r
+ /* TRD : create a queue with 1,000,000 elements\r
+\r
+ use a single thread to enqueue every element\r
+ each elements user data is an incrementing counter\r
+\r
+ then run one thread per CPU\r
+ where each busy-works dequeuing\r
+\r
+ when an element is dequeued, we check (on a per-thread basis) the\r
+ value deqeued is greater than the element previously dequeued\r
+ */\r
+\r
+ internal_display_test_name( "Dequeuing" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds601_queue_new( &qs, 1000000 );\r
+\r
+ for( loop = 0 ; loop < 1000000 ; loop++ )\r
+ lfds601_queue_enqueue( qs, (void *) (lfds601_atom_t) loop );\r
+\r
+ qtds = malloc( sizeof(struct queue_test_dequeuing_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (qtds+loop)->qs = qs;\r
+ (qtds+loop)->error_flag = LOWERED;\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_simple_dequeuer, qtds+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : check queue is empty\r
+ lfds601_queue_query( qs, LFDS601_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ // TRD : check for raised error flags\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ if( (qtds+loop)->error_flag == RAISED )\r
+ dvs[0] = LFDS601_VALIDITY_INVALID_TEST_DATA;\r
+\r
+ free( qtds );\r
+\r
+ lfds601_queue_delete( qs, NULL, NULL );\r
+\r
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_dequeuer( void *queue_test_dequeuing_state )\r
+{\r
+ struct queue_test_dequeuing_state\r
+ *qtds;\r
+\r
+ lfds601_atom_t\r
+ *prev_user_data,\r
+ *user_data;\r
+\r
+ assert( queue_test_dequeuing_state != NULL );\r
+\r
+ qtds = (struct queue_test_dequeuing_state *) queue_test_dequeuing_state;\r
+\r
+ lfds601_queue_dequeue( qtds->qs, (void *) &prev_user_data );\r
+\r
+ while( lfds601_queue_dequeue(qtds->qs, (void *) &user_data) )\r
+ {\r
+ if( user_data <= prev_user_data )\r
+ qtds->error_flag = RAISED;\r
+\r
+ prev_user_data = user_data;\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void queue_test_enqueuing_and_dequeuing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ subloop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds601_queue_state\r
+ *qs;\r
+\r
+ struct queue_test_enqueuing_and_dequeuing_state\r
+ *qteds;\r
+\r
+ struct lfds601_validation_info\r
+ vi = { 0, 0 };\r
+\r
+ enum lfds601_data_structure_validity\r
+ dvs[2];\r
+\r
+ internal_display_test_name( "Enqueuing and dequeuing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds601_queue_new( &qs, cpu_count );\r
+\r
+ qteds = malloc( sizeof(struct queue_test_enqueuing_and_dequeuing_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (qteds+loop)->qs = qs;\r
+ (qteds+loop)->thread_number = loop;\r
+ (qteds+loop)->counter = (lfds601_atom_t) loop << (sizeof(lfds601_atom_t)*8-8);\r
+ (qteds+loop)->cpu_count = cpu_count;\r
+ (qteds+loop)->error_flag = LOWERED;\r
+ (qteds+loop)->per_thread_counters = malloc( sizeof(lfds601_atom_t) * cpu_count );\r
+\r
+ for( subloop = 0 ; subloop < cpu_count ; subloop++ )\r
+ *((qteds+loop)->per_thread_counters+subloop) = 0;\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_enqueuer_and_dequeuer, qteds+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ lfds601_queue_query( qs, LFDS601_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ if( (qteds+loop)->error_flag == RAISED )\r
+ dvs[0] = LFDS601_VALIDITY_INVALID_TEST_DATA;\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ free( (qteds+loop)->per_thread_counters );\r
+\r
+ free( qteds );\r
+\r
+ lfds601_queue_delete( qs, NULL, NULL );\r
+\r
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_enqueuer_and_dequeuer( void *queue_test_enqueuing_and_dequeuing_state )\r
+{\r
+ struct queue_test_enqueuing_and_dequeuing_state\r
+ *qteds;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ lfds601_atom_t\r
+ thread,\r
+ count,\r
+ user_data;\r
+\r
+ assert( queue_test_enqueuing_and_dequeuing_state != NULL );\r
+\r
+ qteds = (struct queue_test_enqueuing_and_dequeuing_state *) queue_test_enqueuing_and_dequeuing_state;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds601_queue_enqueue( qteds->qs, (void *) (qteds->counter++) );\r
+ lfds601_queue_dequeue( qteds->qs, (void *) &user_data );\r
+\r
+ thread = user_data >> (sizeof(lfds601_atom_t)*8-8);\r
+ count = (user_data << 8) >> 8;\r
+\r
+ if( thread >= qteds->cpu_count )\r
+ qteds->error_flag = RAISED;\r
+ else\r
+ {\r
+ if( count < qteds->per_thread_counters[thread] )\r
+ qteds->error_flag = RAISED;\r
+\r
+ if( count >= qteds->per_thread_counters[thread] )\r
+ qteds->per_thread_counters[thread] = count+1;\r
+ }\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void queue_test_rapid_enqueuing_and_dequeuing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds601_queue_state\r
+ *qs;\r
+\r
+ struct queue_test_rapid_enqueuing_and_dequeuing_state\r
+ *qtreds;\r
+\r
+ struct lfds601_validation_info\r
+ vi = { 50000, 50000 };\r
+\r
+ lfds601_atom_t\r
+ user_data,\r
+ thread,\r
+ count,\r
+ *per_thread_counters;\r
+\r
+ enum lfds601_data_structure_validity\r
+ dvs[2];\r
+\r
+ internal_display_test_name( "Rapid enqueuing and dequeuing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds601_queue_new( &qs, 100000 );\r
+\r
+ for( loop = 0 ; loop < 50000 ; loop++ )\r
+ lfds601_queue_enqueue( qs, NULL );\r
+\r
+ qtreds = malloc( sizeof(struct queue_test_rapid_enqueuing_and_dequeuing_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (qtreds+loop)->qs = qs;\r
+ (qtreds+loop)->counter = (lfds601_atom_t) loop << (sizeof(lfds601_atom_t)*8-8);\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_rapid_enqueuer_and_dequeuer, qtreds+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ lfds601_queue_query( qs, LFDS601_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ // TRD : now check results\r
+ per_thread_counters = malloc( sizeof(lfds601_atom_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ *(per_thread_counters+loop) = 0;\r
+\r
+ while( dvs[0] == LFDS601_VALIDITY_VALID and dvs[1] == LFDS601_VALIDITY_VALID and lfds601_queue_dequeue(qs, (void *) &user_data) )\r
+ {\r
+ thread = user_data >> (sizeof(lfds601_atom_t)*8-8);\r
+ count = (user_data << 8) >> 8;\r
+\r
+ if( thread >= cpu_count )\r
+ {\r
+ dvs[0] = LFDS601_VALIDITY_INVALID_TEST_DATA;\r
+ break;\r
+ }\r
+\r
+ if( per_thread_counters[thread] == 0 )\r
+ per_thread_counters[thread] = count;\r
+\r
+ if( count < per_thread_counters[thread] )\r
+ dvs[0] = LFDS601_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+\r
+ if( count >= per_thread_counters[thread] )\r
+ per_thread_counters[thread] = count+1;\r
+ }\r
+\r
+ free( per_thread_counters );\r
+\r
+ free( qtreds );\r
+\r
+ lfds601_queue_delete( qs, NULL, NULL );\r
+\r
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_rapid_enqueuer_and_dequeuer( void *queue_test_rapid_enqueuing_and_dequeuing_state )\r
+{\r
+ struct queue_test_rapid_enqueuing_and_dequeuing_state\r
+ *qtreds;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ lfds601_atom_t\r
+ user_data;\r
+\r
+ assert( queue_test_rapid_enqueuing_and_dequeuing_state != NULL );\r
+\r
+ qtreds = (struct queue_test_rapid_enqueuing_and_dequeuing_state *) queue_test_rapid_enqueuing_and_dequeuing_state;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds601_queue_enqueue( qtreds->qs, (void *) (qtreds->counter++) );\r
+ lfds601_queue_dequeue( qtreds->qs, (void *) &user_data );\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_lfds601_ringbuffer( void )\r
+{\r
+ printf( "\n"\r
+ "Ringbuffer Tests\n"\r
+ "================\n" );\r
+\r
+ ringbuffer_test_reading();\r
+ ringbuffer_test_writing();\r
+ ringbuffer_test_reading_and_writing();\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void ringbuffer_test_reading( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds601_ringbuffer_state\r
+ *rs;\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ struct ringbuffer_test_reading_state\r
+ *rtrs;\r
+\r
+ struct lfds601_validation_info\r
+ vi = { 0, 0 };\r
+\r
+ enum lfds601_data_structure_validity\r
+ dvs[3];\r
+\r
+ lfds601_atom_t\r
+ total_read = 0;\r
+\r
+ /* TRD : we create a single ringbuffer\r
+ with 1,000,000 elements\r
+ we populate the ringbuffer, where the\r
+ user data is an incrementing counter\r
+\r
+ we create one thread per CPU\r
+ where each thread busy-works,\r
+ reading until the ringbuffer is empty\r
+\r
+ each thread keep track of the number of reads it manages\r
+ and that each user data it reads is greater than the\r
+ previous user data that was read\r
+ */\r
+\r
+ internal_display_test_name( "Reading" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds601_ringbuffer_new( &rs, 1000000, NULL, NULL );\r
+\r
+ for( loop = 0 ; loop < 1000000 ; loop++ )\r
+ {\r
+ lfds601_ringbuffer_get_write_element( rs, &fe, NULL );\r
+ lfds601_freelist_set_user_data_in_element( fe, (void *) (lfds601_atom_t) loop );\r
+ lfds601_ringbuffer_put_write_element( rs, fe );\r
+ }\r
+\r
+ rtrs = malloc( sizeof(struct ringbuffer_test_reading_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (rtrs+loop)->rs = rs;\r
+ (rtrs+loop)->read_count = 0;\r
+ (rtrs+loop)->error_flag = LOWERED;\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_simple_reader, rtrs+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ lfds601_ringbuffer_query( rs, LFDS601_RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ // TRD : check for raised error flags\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ if( (rtrs+loop)->error_flag == RAISED )\r
+ dvs[0] = LFDS601_VALIDITY_INVALID_TEST_DATA;\r
+\r
+ // TRD : check thread reads total to 1,000,000\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ total_read += (rtrs+loop)->read_count;\r
+\r
+ if( total_read < 1000000 )\r
+ dvs[0] = LFDS601_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ if( total_read > 1000000 )\r
+ dvs[0] = LFDS601_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+\r
+ free( rtrs );\r
+\r
+ lfds601_ringbuffer_delete( rs, NULL, NULL );\r
+\r
+ internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_reader( void *ringbuffer_test_reading_state )\r
+{\r
+ struct ringbuffer_test_reading_state\r
+ *rtrs;\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ lfds601_atom_t\r
+ *prev_user_data,\r
+ *user_data;\r
+\r
+ assert( ringbuffer_test_reading_state != NULL );\r
+\r
+ rtrs = (struct ringbuffer_test_reading_state *) ringbuffer_test_reading_state;\r
+\r
+ lfds601_ringbuffer_get_read_element( rtrs->rs, &fe );\r
+ lfds601_freelist_get_user_data_from_element( fe, (void **) &prev_user_data );\r
+ lfds601_ringbuffer_put_read_element( rtrs->rs, fe );\r
+\r
+ rtrs->read_count++;\r
+\r
+ while( lfds601_ringbuffer_get_read_element(rtrs->rs, &fe) )\r
+ {\r
+ lfds601_freelist_get_user_data_from_element( fe, (void **) &user_data );\r
+ lfds601_ringbuffer_put_read_element( rtrs->rs, fe );\r
+\r
+ if( user_data <= prev_user_data )\r
+ rtrs->error_flag = RAISED;\r
+\r
+ prev_user_data = user_data;\r
+\r
+ rtrs->read_count++;\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void ringbuffer_test_writing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds601_ringbuffer_state\r
+ *rs;\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ struct ringbuffer_test_writing_state\r
+ *rtws;\r
+\r
+ struct lfds601_validation_info\r
+ vi = { 100000, 100000 };\r
+\r
+ enum lfds601_data_structure_validity\r
+ dvs[3];\r
+\r
+ lfds601_atom_t\r
+ thread,\r
+ count,\r
+ user_data,\r
+ *per_thread_counters;\r
+\r
+ /* TRD : we create a single ringbuffer\r
+ with 100000 elements\r
+ the ringbuffers starts empty\r
+\r
+ we create one thread per CPU\r
+ where each thread busy-works writing\r
+ for ten seconds\r
+\r
+ the user data in each written element is a combination\r
+ of the thread number and the counter\r
+\r
+ after the threads are complete, we validate by\r
+ checking the user data counters increment on a per thread\r
+ basis\r
+ */\r
+\r
+ internal_display_test_name( "Writing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds601_ringbuffer_new( &rs, 100000, NULL, NULL );\r
+\r
+ rtws = malloc( sizeof(struct ringbuffer_test_writing_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (rtws+loop)->rs = rs;\r
+ (rtws+loop)->write_count = (lfds601_atom_t) loop << (sizeof(lfds601_atom_t)*8-8);\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_simple_writer, rtws+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : now check results\r
+ per_thread_counters = malloc( sizeof(lfds601_atom_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ *(per_thread_counters+loop) = 0;\r
+\r
+ lfds601_ringbuffer_query( rs, LFDS601_RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ while( dvs[0] == LFDS601_VALIDITY_VALID and dvs[1] == LFDS601_VALIDITY_VALID and dvs[2] == LFDS601_VALIDITY_VALID and lfds601_ringbuffer_get_read_element(rs, &fe) )\r
+ {\r
+ lfds601_freelist_get_user_data_from_element( fe, (void *) &user_data );\r
+\r
+ thread = user_data >> (sizeof(lfds601_atom_t)*8-8);\r
+ count = (user_data << 8) >> 8;\r
+\r
+ if( thread >= cpu_count )\r
+ {\r
+ dvs[0] = LFDS601_VALIDITY_INVALID_TEST_DATA;\r
+ lfds601_ringbuffer_put_read_element( rs, fe );\r
+ break;\r
+ }\r
+\r
+ if( per_thread_counters[thread] == 0 )\r
+ per_thread_counters[thread] = count;\r
+\r
+ if( count < per_thread_counters[thread] )\r
+ dvs[0] = LFDS601_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+\r
+ if( count >= per_thread_counters[thread] )\r
+ per_thread_counters[thread] = count+1;\r
+\r
+ lfds601_ringbuffer_put_read_element( rs, fe );\r
+ }\r
+\r
+ free( per_thread_counters );\r
+\r
+ free( rtws );\r
+\r
+ lfds601_ringbuffer_delete( rs, NULL, NULL );\r
+\r
+ internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_writer( void *ringbuffer_test_writing_state )\r
+{\r
+ struct ringbuffer_test_writing_state\r
+ *rtws;\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ assert( ringbuffer_test_writing_state != NULL );\r
+\r
+ rtws = (struct ringbuffer_test_writing_state *) ringbuffer_test_writing_state;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds601_ringbuffer_get_write_element( rtws->rs, &fe, NULL );\r
+ lfds601_freelist_set_user_data_in_element( fe, (void *) (lfds601_atom_t) (rtws->write_count++) );\r
+ lfds601_ringbuffer_put_write_element( rtws->rs, fe );\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void ringbuffer_test_reading_and_writing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ subloop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds601_ringbuffer_state\r
+ *rs;\r
+\r
+ struct ringbuffer_test_reading_and_writing_state\r
+ *rtrws;\r
+\r
+ struct lfds601_validation_info\r
+ vi = { 0, 0 };\r
+\r
+ enum lfds601_data_structure_validity\r
+ dvs[3];\r
+\r
+ /* TRD : we create a single ringbuffer\r
+ with 100000 elements\r
+ the ringbuffers starts empty\r
+\r
+ we create one thread per CPU\r
+ where each thread busy-works writing\r
+ and then immediately reading\r
+ for ten seconds\r
+\r
+ the user data in each written element is a combination\r
+ of the thread number and the counter\r
+\r
+ while a thread runs, it keeps track of the\r
+ counters for the other threads and throws an error\r
+ if it sees the number stay the same or decrease\r
+ */\r
+\r
+ internal_display_test_name( "Reading and writing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds601_ringbuffer_new( &rs, 100000, NULL, NULL );\r
+\r
+ rtrws = malloc( sizeof(struct ringbuffer_test_reading_and_writing_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (rtrws+loop)->rs = rs;\r
+ (rtrws+loop)->counter = (lfds601_atom_t) loop << (sizeof(lfds601_atom_t)*8-8);\r
+ (rtrws+loop)->cpu_count = cpu_count;\r
+ (rtrws+loop)->error_flag = LOWERED;\r
+ (rtrws+loop)->per_thread_counters = malloc( sizeof(lfds601_atom_t) * cpu_count );\r
+\r
+ for( subloop = 0 ; subloop < cpu_count ; subloop++ )\r
+ *((rtrws+loop)->per_thread_counters+subloop) = 0;\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_reader_writer, rtrws+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ lfds601_ringbuffer_query( rs, LFDS601_RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ if( (rtrws+loop)->error_flag == RAISED )\r
+ dvs[0] = LFDS601_VALIDITY_INVALID_TEST_DATA;\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ free( (rtrws+loop)->per_thread_counters );\r
+\r
+ free( rtrws );\r
+\r
+ lfds601_ringbuffer_delete( rs, NULL, NULL );\r
+\r
+ internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION ringbuffer_test_thread_reader_writer( void *ringbuffer_test_reading_and_writing_state )\r
+{\r
+ struct ringbuffer_test_reading_and_writing_state\r
+ *rtrws;\r
+\r
+ struct lfds601_freelist_element\r
+ *fe;\r
+\r
+ lfds601_atom_t\r
+ user_data,\r
+ thread,\r
+ count;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ assert( ringbuffer_test_reading_and_writing_state != NULL );\r
+\r
+ rtrws = (struct ringbuffer_test_reading_and_writing_state *) ringbuffer_test_reading_and_writing_state;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds601_ringbuffer_get_write_element( rtrws->rs, &fe, NULL );\r
+ lfds601_freelist_set_user_data_in_element( fe, (void *) (lfds601_atom_t) (rtrws->counter++) );\r
+ lfds601_ringbuffer_put_write_element( rtrws->rs, fe );\r
+\r
+ lfds601_ringbuffer_get_read_element( rtrws->rs, &fe );\r
+ lfds601_freelist_get_user_data_from_element( fe, (void *) &user_data );\r
+\r
+ thread = user_data >> (sizeof(lfds601_atom_t)*8-8);\r
+ count = (user_data << 8) >> 8;\r
+\r
+ if( thread >= rtrws->cpu_count )\r
+ rtrws->error_flag = RAISED;\r
+ else\r
+ {\r
+ if( count < rtrws->per_thread_counters[thread] )\r
+ rtrws->error_flag = RAISED;\r
+\r
+ if( count >= rtrws->per_thread_counters[thread] )\r
+ rtrws->per_thread_counters[thread] = count+1;\r
+ }\r
+\r
+ lfds601_ringbuffer_put_read_element( rtrws->rs, fe );\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_lfds601_slist( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds601_slist_thread_start_state\r
+ stss;\r
+\r
+ /* TRD : 1. one head writer per CPU\r
+ 2. make one element, then one after writer per CPU\r
+ 3. make a list, then one list traverser per CPU\r
+ 4. one head writer and one list traverser per CPU\r
+ 5. make one element, then one after writer and one list traverser per CPU\r
+ 6. make a list, then one 100% deleter-traverser per CPU\r
+ 7. make a list, then one 25% deleter-traverser per CPU\r
+ 8. one head writer and one 100% deleter-traverse per CPU\r
+ 9. one head writer and one 25% deleter-traverse per CPU\r
+ 10. make one element, then one after writer and one 100% deleter-traverser per CPU\r
+ 11. make one element, then one after writer and one 25% deleter-traverser per CPU\r
+ 12. one head writer, one after writer, one traverser and one 25% deleter-traverser per CPU\r
+ */\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ printf( "\n"\r
+ "SList Test\n"\r
+ "==========\n" );\r
+\r
+ // TRD : 1. one head writer per CPU\r
+\r
+ printf( "\n"\r
+ "1. one head writer per CPU\n"\r
+ "==========================\n" );\r
+\r
+ lfds601_slist_new( &stss.ss, NULL, NULL );\r
+ stss.iteration_modulo = 1;\r
+ stss.se = NULL;\r
+ stss.duration = 1;\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_slist_internal_thread_head_writer, &stss );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_slist_delete( stss.ss );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 2. make one element, then one after writer per CPU\r
+\r
+ printf( "\n"\r
+ "2. make one element, then one after writer per CPU\n"\r
+ "==================================================\n" );\r
+\r
+ lfds601_slist_new( &stss.ss, NULL, NULL );\r
+ stss.iteration_modulo = 1;\r
+ stss.se = lfds601_slist_new_head( stss.ss, (void *) NULL );\r
+ stss.duration = 1;\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_slist_internal_thread_after_writer, &stss );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_slist_delete( stss.ss );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 3. make a list, then one list traverser per CPU\r
+\r
+ printf( "\n"\r
+ "3. make a list, then one list traverser per CPU\n"\r
+ "===============================================\n" );\r
+\r
+ lfds601_slist_new( &stss.ss, NULL, NULL );\r
+ stss.iteration_modulo = 1;\r
+ stss.se = NULL;\r
+ stss.duration = 10;\r
+\r
+ // TRD : small list so we get collisions\r
+ for( loop = 0 ; loop < 10 ; loop++ )\r
+ lfds601_slist_new_head( stss.ss, (void *) 0 );\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_slist_internal_thread_traverser, &stss );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_slist_delete( stss.ss );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 4. one head writer and one list traverser per CPU\r
+\r
+ printf( "\n"\r
+ "4. one head writer and one list traverser per CPU\n"\r
+ "=================================================\n" );\r
+\r
+ lfds601_slist_new( &stss.ss, NULL, NULL );\r
+ stss.iteration_modulo = 1;\r
+ stss.se = NULL;\r
+ stss.duration = 1;\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_slist_internal_thread_head_writer, &stss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds601_slist_internal_thread_traverser, &stss );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_slist_delete( stss.ss );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 5. make one element, then one after writer and one list traverser per CPU\r
+\r
+ printf( "\n"\r
+ "5. make one element, then one after writer and one list traverser per CPU\n"\r
+ "=========================================================================\n" );\r
+\r
+ lfds601_slist_new( &stss.ss, NULL, NULL );\r
+ stss.iteration_modulo = 1;\r
+ stss.se = lfds601_slist_new_head( stss.ss, (void *) NULL );\r
+ stss.duration = 1;\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_slist_internal_thread_after_writer, &stss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds601_slist_internal_thread_traverser, &stss );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_slist_delete( stss.ss );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 6. make a list, then one 100% deleter-traverser per CPU\r
+\r
+ printf( "\n"\r
+ "6. make a list, then one 100%% deleter-traverser per CPU\n"\r
+ "=======================================================\n" );\r
+\r
+ lfds601_slist_new( &stss.ss, NULL, NULL );\r
+ stss.iteration_modulo = 1;\r
+ stss.se = NULL;\r
+ stss.duration = 1;\r
+\r
+ for( loop = 0 ; loop < 10000 ; loop++ )\r
+ lfds601_slist_new_head( stss.ss, (void *) 0 );\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_slist_internal_thread_deleter_traverser, &stss );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_slist_delete( stss.ss );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 7. make a list, then one 25% deleter-traverser per CPU\r
+\r
+ printf( "\n"\r
+ "7. make a list, then one 25%% deleter-traverser per CPU\n"\r
+ "======================================================\n" );\r
+\r
+ lfds601_slist_new( &stss.ss, NULL, NULL );\r
+ stss.iteration_modulo = 4;\r
+ stss.se = NULL;\r
+ stss.duration = 1;\r
+\r
+ for( loop = 0 ; loop < 10000 ; loop++ )\r
+ lfds601_slist_new_head( stss.ss, (void *) 0 );\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_slist_internal_thread_deleter_traverser, &stss );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_slist_delete( stss.ss );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 8. one head writer and one 100% deleter-traverse per CPU\r
+\r
+ printf( "\n"\r
+ "8. one head writer and one 100%% deleter-traverse per CPU\n"\r
+ "========================================================\n" );\r
+\r
+ lfds601_slist_new( &stss.ss, NULL, NULL );\r
+ stss.iteration_modulo = 1;\r
+ stss.se = NULL;\r
+ stss.duration = 10;\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_slist_internal_thread_head_writer, &stss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds601_slist_internal_thread_deleter_traverser, &stss );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_slist_delete( stss.ss );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 9. one head writer and one 25% deleter-traverse per CPU\r
+\r
+ printf( "\n"\r
+ "9. one head writer and one 25%% deleter-traverse per CPU\n"\r
+ "=======================================================\n" );\r
+\r
+ lfds601_slist_new( &stss.ss, NULL, NULL );\r
+ stss.iteration_modulo = 4;\r
+ stss.se = NULL;\r
+ stss.duration = 1;\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_slist_internal_thread_head_writer, &stss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds601_slist_internal_thread_deleter_traverser, &stss );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_slist_delete( stss.ss );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 10. make one element, then one after writer and one 100% deleter-traverser per CPU\r
+\r
+ printf( "\n"\r
+ "10. make one element, then one after writer and one 100%% deleter-traverser per CPU\n"\r
+ "==================================================================================\n" );\r
+\r
+ lfds601_slist_new( &stss.ss, NULL, NULL );\r
+ stss.iteration_modulo = 1;\r
+ stss.se = lfds601_slist_new_head( stss.ss, (void *) NULL );\r
+ stss.duration = 10;\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_slist_internal_thread_after_writer, &stss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds601_slist_internal_thread_deleter_traverser, &stss );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_slist_delete( stss.ss );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 11. make one element, then one after writer and one 25% deleter-traverser per CPU\r
+\r
+ printf( "\n"\r
+ "11. make one element, then one after writer and one 25%% deleter-traverser per CPU\n"\r
+ "=================================================================================\n" );\r
+\r
+ lfds601_slist_new( &stss.ss, NULL, NULL );\r
+ stss.iteration_modulo = 4;\r
+ stss.se = lfds601_slist_new_head( stss.ss, (void *) NULL );\r
+ stss.duration = 1;\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_slist_internal_thread_after_writer, &stss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds601_slist_internal_thread_deleter_traverser, &stss );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_slist_delete( stss.ss );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 12. one head writer, one after writer, one traverser and one 25% deleter-traverser per CPU\r
+\r
+ printf( "\n"\r
+ "12. one head writer, one after writer, one traverser and one 25%% deleter-traverser per CPU\n"\r
+ "==========================================================================================\n" );\r
+\r
+ lfds601_slist_new( &stss.ss, NULL, NULL );\r
+ stss.iteration_modulo = 4;\r
+ stss.se = lfds601_slist_new_head( stss.ss, (void *) NULL );\r
+ stss.duration = 1;\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 4 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_slist_internal_thread_head_writer, &stss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds601_slist_internal_thread_after_writer, &stss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count*2], loop, lfds601_slist_internal_thread_traverser, &stss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count*3], loop, lfds601_slist_internal_thread_deleter_traverser, &stss );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 4 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_slist_delete( stss.ss );\r
+\r
+ free( thread_handles );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION lfds601_slist_internal_thread_head_writer( void *lfds601_slist_thread_start_state )\r
+{\r
+ struct lfds601_slist_thread_start_state\r
+ *stss;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ unsigned long int\r
+ count = 0;\r
+\r
+ assert( lfds601_slist_thread_start_state != NULL );\r
+\r
+ stss = (struct lfds601_slist_thread_start_state *) lfds601_slist_thread_start_state;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + stss->duration )\r
+ if( lfds601_slist_new_head(stss->ss, (void *) 0) )\r
+ count++;\r
+\r
+ printf( "head writer count = %lu\n", count );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION lfds601_slist_internal_thread_after_writer( void *lfds601_slist_thread_start_state )\r
+{\r
+ struct lfds601_slist_thread_start_state\r
+ *stss;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ unsigned long int\r
+ count = 0;\r
+\r
+ assert( lfds601_slist_thread_start_state != NULL );\r
+\r
+ stss = (struct lfds601_slist_thread_start_state *) lfds601_slist_thread_start_state;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + stss->duration )\r
+ if( lfds601_slist_new_next(stss->se, (void *) 0) )\r
+ count++;\r
+\r
+ printf( "after writer count = %lu\n", count );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION lfds601_slist_internal_thread_traverser( void *lfds601_slist_thread_start_state )\r
+{\r
+ struct lfds601_slist_thread_start_state\r
+ *stss;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ unsigned long int\r
+ count = 0,\r
+ iteration = 0;\r
+\r
+ struct lfds601_slist_element\r
+ *se;\r
+\r
+ assert( lfds601_slist_thread_start_state != NULL );\r
+\r
+ stss = (struct lfds601_slist_thread_start_state *) lfds601_slist_thread_start_state;\r
+\r
+ time( &start_time );\r
+\r
+ lfds601_slist_get_head( stss->ss, &se );\r
+\r
+ while( time(NULL) < start_time + stss->duration )\r
+ {\r
+ if( !(iteration % stss->iteration_modulo) )\r
+ {\r
+ lfds601_slist_get_next( se, &se );\r
+ count++;\r
+ }\r
+\r
+ if( se == NULL )\r
+ {\r
+ lfds601_slist_get_head( stss->ss, &se );\r
+ count++;\r
+ }\r
+\r
+ iteration++;\r
+ }\r
+\r
+ printf( "traverser count = %lu\n", count );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION lfds601_slist_internal_thread_deleter_traverser( void *lfds601_slist_thread_start_state )\r
+{\r
+ struct lfds601_slist_thread_start_state\r
+ *stss;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ unsigned long int\r
+ count = 0,\r
+ iteration = 0;\r
+\r
+ struct lfds601_slist_element\r
+ *se;\r
+\r
+ assert( lfds601_slist_thread_start_state != NULL );\r
+\r
+ stss = (struct lfds601_slist_thread_start_state *) lfds601_slist_thread_start_state;\r
+\r
+ time( &start_time );\r
+\r
+ lfds601_slist_get_head( stss->ss, &se );\r
+\r
+ while( time(NULL) < start_time + stss->duration )\r
+ {\r
+ if( se != NULL and !(iteration % stss->iteration_modulo) )\r
+ {\r
+ lfds601_slist_delete_element( stss->ss, se );\r
+ count++;\r
+ }\r
+\r
+ if( se != NULL )\r
+ lfds601_slist_get_next( se, &se );\r
+\r
+ if( se == NULL )\r
+ lfds601_slist_get_head( stss->ss, &se );\r
+\r
+ iteration++;\r
+ }\r
+\r
+ printf( "deleter-traverser count = %lu\n", count );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_lfds601_stack( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ struct lfds601_stack_state\r
+ *ss;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ /* TRD : there are 5 tests\r
+\r
+ 1. single reader thread per CPU\r
+ - stack always empty\r
+ 2. single writer thread per CPU\r
+ - stack always full\r
+ 3. one reader and one writer thread per CPU\r
+ - stack balanced\r
+ 4. one reader and two writer threads per CPU\r
+ - stack grows\r
+ 5. two reader and one writer thread per CPU\r
+ - stack tends to empty\r
+ */\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ printf( "\n"\r
+ "Stack Test\n"\r
+ "==========\n" );\r
+\r
+ // TRD : 1. single reader thread per CPU\r
+\r
+ printf( "\n"\r
+ "1. single reader thread per CPU\n"\r
+ "===============================\n" );\r
+\r
+ lfds601_stack_new( &ss, 10000 );\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_stack_internal_thread_reader, ss );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_stack_delete( ss, NULL, NULL );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 2. single writer thread per CPU\r
+\r
+ printf( "\n"\r
+ "2. single writer thread per CPU\n"\r
+ "===============================\n" );\r
+\r
+ lfds601_stack_new( &ss, 10000 );\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 1 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_stack_internal_thread_writer, ss );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_stack_delete( ss, NULL, NULL );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 3. one reader and one writer thread per CPU\r
+\r
+ printf( "\n"\r
+ "3. one reader and one writer thread per CPU\n"\r
+ "===========================================\n" );\r
+\r
+ lfds601_stack_new( &ss, 10000 );\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_stack_internal_thread_reader, ss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds601_stack_internal_thread_writer, ss );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_stack_delete( ss, NULL, NULL );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 4. one reader and two writer threads per CPU\r
+\r
+ printf( "\n"\r
+ "4. one reader and two writer threads per CPU\n"\r
+ "============================================\n" );\r
+\r
+ lfds601_stack_new( &ss, 10000 );\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 3 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_stack_internal_thread_reader, ss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds601_stack_internal_thread_writer, ss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count*2], loop, lfds601_stack_internal_thread_writer, ss );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 3 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_stack_delete( ss, NULL, NULL );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : 5. two reader and one writer thread per CPU\r
+\r
+ printf( "\n"\r
+ "5. two reader and one writer thread per CPU\n"\r
+ "===========================================\n" );\r
+\r
+ lfds601_stack_new( &ss, 10000 );\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count * 3 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, lfds601_stack_internal_thread_reader, ss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, lfds601_stack_internal_thread_reader, ss );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count*2], loop, lfds601_stack_internal_thread_writer, ss );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 3 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ lfds601_stack_delete( ss, NULL, NULL );\r
+\r
+ free( thread_handles );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION lfds601_stack_internal_thread_reader( void *lfds601_stack_state )\r
+{\r
+ struct lfds601_stack_state\r
+ *ss;\r
+\r
+ void\r
+ *user_data;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ unsigned long long int\r
+ count = 0;\r
+\r
+ assert( lfds601_stack_state != NULL );\r
+\r
+ ss = (struct lfds601_stack_state *) lfds601_stack_state;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ if( lfds601_stack_pop(ss, &user_data) )\r
+ count++;\r
+ }\r
+\r
+ printf( "read count = %llu\n", count );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION lfds601_stack_internal_thread_writer( void *lfds601_stack_state )\r
+{\r
+ struct lfds601_stack_state\r
+ *ss;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ unsigned long long int\r
+ count = 0;\r
+\r
+ assert( lfds601_stack_state != NULL );\r
+\r
+ ss = (struct lfds601_stack_state *) lfds601_stack_state;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ // TRD : we don't store any user data\r
+ if( lfds601_stack_push(ss, NULL) )\r
+ count++;\r
+ }\r
+\r
+ printf( "write count = %llu\n", count );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+\r
+Microsoft Visual Studio Solution File, Format Version 10.00\r
+# Visual Studio 2008\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test", "test.vcproj", "{6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}"\r
+ ProjectSection(ProjectDependencies) = postProject\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05} = {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}\r
+ EndProjectSection\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblfds601", "..\liblfds601\liblfds601.vcproj", "{F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug DLL|Win32 = Debug DLL|Win32\r
+ Debug DLL|x64 = Debug DLL|x64\r
+ Debug Lib|Win32 = Debug Lib|Win32\r
+ Debug Lib|x64 = Debug Lib|x64\r
+ Debug|Win32 = Debug|Win32\r
+ Debug|x64 = Debug|x64\r
+ Release DLL|Win32 = Release DLL|Win32\r
+ Release DLL|x64 = Release DLL|x64\r
+ Release Lib|Win32 = Release Lib|Win32\r
+ Release Lib|x64 = Release Lib|x64\r
+ Release|Win32 = Release|Win32\r
+ Release|x64 = Release|x64\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug DLL|Win32.ActiveCfg = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug DLL|x64.ActiveCfg = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug Lib|Win32.ActiveCfg = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug Lib|x64.ActiveCfg = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|Win32.ActiveCfg = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|Win32.Build.0 = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|x64.ActiveCfg = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|x64.Build.0 = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release DLL|Win32.ActiveCfg = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release DLL|x64.ActiveCfg = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release Lib|Win32.ActiveCfg = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release Lib|x64.ActiveCfg = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|Win32.ActiveCfg = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|Win32.Build.0 = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|x64.ActiveCfg = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|x64.Build.0 = Release|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.Build.0 = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.ActiveCfg = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.Build.0 = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.ActiveCfg = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.Build.0 = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.ActiveCfg = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.Build.0 = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|Win32.ActiveCfg = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|Win32.Build.0 = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|x64.ActiveCfg = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|x64.Build.0 = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.ActiveCfg = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.Build.0 = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.ActiveCfg = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.Build.0 = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.ActiveCfg = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.Build.0 = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.ActiveCfg = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.Build.0 = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|Win32.ActiveCfg = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|Win32.Build.0 = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|x64.ActiveCfg = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|x64.Build.0 = Release Lib|x64\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+<?xml version="1.0" encoding="Windows-1252"?>\r
+<VisualStudioProject\r
+ ProjectType="Visual C++"\r
+ Version="9.00"\r
+ Name="test"\r
+ ProjectGUID="{6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}"\r
+ RootNamespace="test"\r
+ TargetFrameworkVersion="196613"\r
+ >\r
+ <Platforms>\r
+ <Platform\r
+ Name="Win32"\r
+ />\r
+ <Platform\r
+ Name="x64"\r
+ />\r
+ </Platforms>\r
+ <ToolFiles>\r
+ </ToolFiles>\r
+ <Configurations>\r
+ <Configuration\r
+ Name="Debug|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds601\inc""\r
+ MinimalRebuild="true"\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ RuntimeLibrary="1"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="4"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="true"\r
+ SubSystem="1"\r
+ TargetMachine="1"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds601\inc""\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="1"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="true"\r
+ SubSystem="1"\r
+ TargetMachine="17"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds601\inc""\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="0"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="false"\r
+ SubSystem="1"\r
+ OptimizeReferences="2"\r
+ EnableCOMDATFolding="2"\r
+ TargetMachine="1"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds601\inc""\r
+ ExceptionHandling="0"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="false"\r
+ SubSystem="1"\r
+ OptimizeReferences="2"\r
+ EnableCOMDATFolding="2"\r
+ TargetMachine="17"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ </Configurations>\r
+ <References>\r
+ </References>\r
+ <Files>\r
+ <Filter\r
+ Name="src"\r
+ Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"\r
+ UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"\r
+ >\r
+ <File\r
+ RelativePath=".\src\abstraction.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\abstraction_cpu_count.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\abstraction_thread_start.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\abstraction_thread_wait.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_freelist.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_queue.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_ringbuffer.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_stack.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\main.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\misc.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\structures.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_abstraction.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_freelist.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_queue.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_ringbuffer.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_slist.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_stack.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ </Files>\r
+ <Globals>\r
+ </Globals>\r
+</VisualStudioProject>\r
--- /dev/null
+building liblfds
+================
+
+Windows (user-mode)
+===================
+1. Use Microsoft Visual Studio 2008 or Visual C++ 2008 Express Edition (or
+ later versions) to load "liblfds.sln". The "Win32" platform is x86,
+ the "x64" platform is x64.
+
+2. Use Microsoft Windows SDK and GNUmake to run makefile.windows (obviously
+ you'll need to have run setenv.bat or the appropriate vcvars*.bat first;
+ you can build for x64/64-bit and x86/32-bit - just run the correct batch
+ file).
+
+ Targets are "librel", "libdbg", "dllrel", "dlldbg" and "clean". You need
+ to clean between switching targets.
+
+Windows (kernel)
+================
+Use the Windows Driver Kit "build" command. Prior to running "build",
+if you wish to build a static library, run the batch file
+"runme_before_win_kernel_static_lib_build.bat"; if you wish to
+build a dynamic library, instead run "runme_before_win_kernel_dynamic_lib_build.bat".
+
+The Windows kernel build system is rather limited and rather than
+really rather messing up the directory/file structure just for the
+Windows kernel platform, I've instead arranged it that these batch
+files do the necessary work so that "build" will work.
+
+The batch files are idempotent; you can run them as often as you
+like, in any order, at any time (before or after builds), and they'll
+do the right thing. You need to clean between switching targets.
+
+Linux
+=====
+Use GNUmake to run "makefile.linux". Targets are "arrel", "ardbg",
+"sorel", "sodbg" and "clean". You need to clean between switching
+targets.
+
+
--- /dev/null
+DIRS = src
+
--- /dev/null
+#ifndef __LIBLFDS610_H
+
+ /***** library header *****/
+ #define LFDS610_RELEASE_NUMBER_STRING "6.1.0"
+
+
+
+
+ /***** lfds610_abstraction *****/
+
+ /***** defines *****/
+ #if (defined _WIN64 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+ // TRD : 64-bit Windows user-mode with the Microsoft C compiler, any CPU
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <windows.h>
+ #include <intrin.h>
+ typedef unsigned __int64 lfds610_atom_t;
+ #define LFDS610_INLINE __forceinline
+ #define LFDS610_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS610_ALIGN_SINGLE_POINTER 8
+ #define LFDS610_ALIGN_DOUBLE_POINTER 16
+ #define LFDS610_BARRIER_COMPILER_LOAD _ReadBarrier()
+ #define LFDS610_BARRIER_COMPILER_STORE _WriteBarrier()
+ #define LFDS610_BARRIER_COMPILER_FULL _ReadWriteBarrier()
+ #define LFDS610_BARRIER_PROCESSOR_LOAD _mm_lfence()
+ #define LFDS610_BARRIER_PROCESSOR_STORE _mm_sfence()
+ #define LFDS610_BARRIER_PROCESSOR_FULL _mm_mfence()
+ #endif
+
+ #if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+ // TRD : 32-bit Windows user-mode with the Microsoft C compiler, any CPU
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <windows.h>
+ #include <intrin.h>
+ typedef unsigned long int lfds610_atom_t;
+ #define LFDS610_INLINE __forceinline
+ #define LFDS610_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS610_ALIGN_SINGLE_POINTER 4
+ #define LFDS610_ALIGN_DOUBLE_POINTER 8
+ #define LFDS610_BARRIER_COMPILER_LOAD _ReadBarrier()
+ #define LFDS610_BARRIER_COMPILER_STORE _WriteBarrier()
+ #define LFDS610_BARRIER_COMPILER_FULL _ReadWriteBarrier()
+ #define LFDS610_BARRIER_PROCESSOR_LOAD _mm_lfence()
+ #define LFDS610_BARRIER_PROCESSOR_STORE _mm_sfence()
+ #define LFDS610_BARRIER_PROCESSOR_FULL _mm_mfence()
+
+ // TRD : this define is documented but missing in Microsoft Platform SDK v7.0
+ #define _InterlockedCompareExchangePointer(destination, exchange, compare) _InterlockedCompareExchange((volatile long *) destination, (long) exchange, (long) compare)
+ #endif
+
+ #if (defined _WIN64 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+ // TRD : 64-bit Windows kernel with the Microsoft C compiler, any CPU
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <wdm.h>
+ typedef unsigned __int64 lfds610_atom_t;
+ #define LFDS610_INLINE __forceinline
+ #define LFDS610_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS610_ALIGN_SINGLE_POINTER 8
+ #define LFDS610_ALIGN_DOUBLE_POINTER 16
+ #define LFDS610_BARRIER_COMPILER_LOAD _ReadBarrier()
+ #define LFDS610_BARRIER_COMPILER_STORE _WriteBarrier()
+ #define LFDS610_BARRIER_COMPILER_FULL _ReadWriteBarrier()
+ #define LFDS610_BARRIER_PROCESSOR_LOAD _mm_lfence()
+ #define LFDS610_BARRIER_PROCESSOR_STORE _mm_sfence()
+ #define LFDS610_BARRIER_PROCESSOR_FULL _mm_mfence()
+ #endif
+
+ #if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+ // TRD : 32-bit Windows kernel with the Microsoft C compiler, any CPU
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <wdm.h>
+ typedef unsigned long int lfds610_atom_t;
+ #define LFDS610_INLINE __forceinline
+ #define LFDS610_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS610_ALIGN_SINGLE_POINTER 4
+ #define LFDS610_ALIGN_DOUBLE_POINTER 8
+ #define LFDS610_BARRIER_COMPILER_LOAD _ReadBarrier()
+ #define LFDS610_BARRIER_COMPILER_STORE _WriteBarrier()
+ #define LFDS610_BARRIER_COMPILER_FULL _ReadWriteBarrier()
+ #define LFDS610_BARRIER_PROCESSOR_LOAD _mm_lfence()
+ #define LFDS610_BARRIER_PROCESSOR_STORE _mm_sfence()
+ #define LFDS610_BARRIER_PROCESSOR_FULL _mm_mfence()
+
+ // TRD : this define is documented but missing in Microsoft Platform SDK v7.0
+ #define _InterlockedCompareExchangePointer(destination, exchange, compare) _InterlockedCompareExchange((volatile long *) destination, (long) exchange, (long) compare)
+ #endif
+
+ #if (defined __unix__ && defined __x86_64__ && __GNUC__)
+ // TRD : any UNIX with GCC on x64
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ typedef unsigned long long int lfds610_atom_t;
+ #define LFDS610_INLINE inline
+ #define LFDS610_ALIGN(alignment) __attribute__( (aligned(alignment)) )
+ #define LFDS610_ALIGN_SINGLE_POINTER 8
+ #define LFDS610_ALIGN_DOUBLE_POINTER 16
+ #define LFDS610_BARRIER_COMPILER_LOAD __asm__ __volatile__ ( "" : : : "memory" )
+ #define LFDS610_BARRIER_COMPILER_STORE __asm__ __volatile__ ( "" : : : "memory" )
+ #define LFDS610_BARRIER_COMPILER_FULL __asm__ __volatile__ ( "" : : : "memory" )
+ #define LFDS610_BARRIER_PROCESSOR_LOAD __sync_synchronize()
+ #define LFDS610_BARRIER_PROCESSOR_STORE __sync_synchronize()
+ #define LFDS610_BARRIER_PROCESSOR_FULL __sync_synchronize()
+ #endif
+
+ #if (defined __unix__ && defined __i686__ && __GNUC__)
+ // TRD : any UNIX with GCC on x86
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ typedef unsigned long int lfds610_atom_t;
+ #define LFDS610_INLINE inline
+ #define LFDS610_ALIGN(alignment) __attribute__( (aligned(alignment)) )
+ #define LFDS610_ALIGN_SINGLE_POINTER 4
+ #define LFDS610_ALIGN_DOUBLE_POINTER 8
+ #define LFDS610_BARRIER_COMPILER_LOAD __asm__ __volatile__ ( "" : : : "memory" )
+ #define LFDS610_BARRIER_COMPILER_STORE __asm__ __volatile__ ( "" : : : "memory" )
+ #define LFDS610_BARRIER_COMPILER_FULL __asm__ __volatile__ ( "" : : : "memory" )
+ #define LFDS610_BARRIER_PROCESSOR_LOAD __sync_synchronize()
+ #define LFDS610_BARRIER_PROCESSOR_STORE __sync_synchronize()
+ #define LFDS610_BARRIER_PROCESSOR_FULL __sync_synchronize()
+ #endif
+
+ #if (defined __unix__ && defined __arm__ && __GNUC__)
+ // TRD : any UNIX with GCC on ARM
+ #include <assert.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ typedef unsigned long int lfds610_atom_t;
+ #define LFDS610_INLINE inline
+ #define LFDS610_ALIGN(alignment) __attribute__( (aligned(alignment)) )
+ #define LFDS610_ALIGN_SINGLE_POINTER 4
+ #define LFDS610_ALIGN_DOUBLE_POINTER 8
+ #define LFDS610_BARRIER_COMPILER_LOAD __asm__ __volatile__ ( "" : : : "memory" )
+ #define LFDS610_BARRIER_COMPILER_STORE __asm__ __volatile__ ( "" : : : "memory" )
+ #define LFDS610_BARRIER_COMPILER_FULL __asm__ __volatile__ ( "" : : : "memory" )
+ #define LFDS610_BARRIER_PROCESSOR_LOAD __sync_synchronize()
+ #define LFDS610_BARRIER_PROCESSOR_STORE __sync_synchronize()
+ #define LFDS610_BARRIER_PROCESSOR_FULL __sync_synchronize()
+ #endif
+
+ #define LFDS610_BARRIER_LOAD LFDS610_BARRIER_COMPILER_LOAD; LFDS610_BARRIER_PROCESSOR_LOAD; LFDS610_BARRIER_COMPILER_LOAD
+ #define LFDS610_BARRIER_STORE LFDS610_BARRIER_COMPILER_STORE; LFDS610_BARRIER_PROCESSOR_STORE; LFDS610_BARRIER_COMPILER_STORE
+ #define LFDS610_BARRIER_FULL LFDS610_BARRIER_COMPILER_FULL; LFDS610_BARRIER_PROCESSOR_FULL; LFDS610_BARRIER_COMPILER_FULL
+
+ /***** enums *****/
+ enum lfds610_data_structure_validity
+ {
+ LFDS610_VALIDITY_VALID,
+ LFDS610_VALIDITY_INVALID_LOOP,
+ LFDS610_VALIDITY_INVALID_MISSING_ELEMENTS,
+ LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS,
+ LFDS610_VALIDITY_INVALID_TEST_DATA
+ };
+
+ /***** structs *****/
+ struct lfds610_validation_info
+ {
+ lfds610_atom_t
+ min_elements,
+ max_elements;
+ };
+
+ /***** public prototypes *****/
+ void *lfds610_abstraction_malloc( size_t size );
+ void lfds610_abstraction_free( void *memory );
+
+
+
+
+
+ /***** lfds610_freelist *****/
+
+ /***** enums *****/
+ enum lfds610_freelist_query_type
+ {
+ LFDS610_FREELIST_QUERY_ELEMENT_COUNT,
+ LFDS610_FREELIST_QUERY_VALIDATE
+ };
+
+ /***** incomplete types *****/
+ struct lfds610_freelist_state;
+ struct lfds610_freelist_element;
+
+ /***** public prototypes *****/
+ int lfds610_freelist_new( struct lfds610_freelist_state **fs, lfds610_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state );
+ void lfds610_freelist_use( struct lfds610_freelist_state *fs );
+ void lfds610_freelist_delete( struct lfds610_freelist_state *fs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+ lfds610_atom_t lfds610_freelist_new_elements( struct lfds610_freelist_state *fs, lfds610_atom_t number_elements );
+
+ struct lfds610_freelist_element *lfds610_freelist_pop( struct lfds610_freelist_state *fs, struct lfds610_freelist_element **fe );
+ struct lfds610_freelist_element *lfds610_freelist_guaranteed_pop( struct lfds610_freelist_state *fs, struct lfds610_freelist_element **fe );
+ void lfds610_freelist_push( struct lfds610_freelist_state *fs, struct lfds610_freelist_element *fe );
+
+ void *lfds610_freelist_get_user_data_from_element( struct lfds610_freelist_element *fe, void **user_data );
+ void lfds610_freelist_set_user_data_in_element( struct lfds610_freelist_element *fe, void *user_data );
+
+ void lfds610_freelist_query( struct lfds610_freelist_state *fs, enum lfds610_freelist_query_type query_type, void *query_input, void *query_output );
+
+
+
+
+
+ /***** lfds610_liblfds *****/
+
+ /***** public prototypes *****/
+ void lfds610_liblfds_abstraction_test_helper_increment_non_atomic( lfds610_atom_t *shared_counter );
+ void lfds610_liblfds_abstraction_test_helper_increment_atomic( volatile lfds610_atom_t *shared_counter );
+ void lfds610_liblfds_abstraction_test_helper_cas( volatile lfds610_atom_t *shared_counter, lfds610_atom_t *local_counter );
+ void lfds610_liblfds_abstraction_test_helper_dcas( volatile lfds610_atom_t *shared_counter, lfds610_atom_t *local_counter );
+
+
+
+
+
+ /***** lfds610_queue *****/
+
+ /***** enums *****/
+ enum lfds610_queue_query_type
+ {
+ LFDS610_QUEUE_QUERY_ELEMENT_COUNT,
+ LFDS610_QUEUE_QUERY_VALIDATE
+ };
+
+ /***** incomplete types *****/
+ struct lfds610_queue_state;
+
+ /***** public prototypes *****/
+ int lfds610_queue_new( struct lfds610_queue_state **sq, lfds610_atom_t number_elements );
+ void lfds610_queue_use( struct lfds610_queue_state *qs );
+ void lfds610_queue_delete( struct lfds610_queue_state *qs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+ int lfds610_queue_enqueue( struct lfds610_queue_state *qs, void *user_data );
+ int lfds610_queue_guaranteed_enqueue( struct lfds610_queue_state *qs, void *user_data );
+ int lfds610_queue_dequeue( struct lfds610_queue_state *qs, void **user_data );
+
+ void lfds610_queue_query( struct lfds610_queue_state *qs, enum lfds610_queue_query_type query_type, void *query_input, void *query_output );
+
+
+
+
+
+ /***** lfds610_ringbuffer *****/
+
+ /***** enums *****/
+ enum lfds610_ringbuffer_query_type
+ {
+ LFDS610_RINGBUFFER_QUERY_VALIDATE
+ };
+
+ /***** incomplete types *****/
+ struct lfds610_ringbuffer_state;
+
+ /***** public prototypes *****/
+ int lfds610_ringbuffer_new( struct lfds610_ringbuffer_state **rs, lfds610_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state );
+ void lfds610_ringbuffer_use( struct lfds610_ringbuffer_state *rs );
+ void lfds610_ringbuffer_delete( struct lfds610_ringbuffer_state *rs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+ struct lfds610_freelist_element *lfds610_ringbuffer_get_read_element( struct lfds610_ringbuffer_state *rs, struct lfds610_freelist_element **fe );
+ struct lfds610_freelist_element *lfds610_ringbuffer_get_write_element( struct lfds610_ringbuffer_state *rs, struct lfds610_freelist_element **fe, int *overwrite_flag );
+
+ void lfds610_ringbuffer_put_read_element( struct lfds610_ringbuffer_state *rs, struct lfds610_freelist_element *fe );
+ void lfds610_ringbuffer_put_write_element( struct lfds610_ringbuffer_state *rs, struct lfds610_freelist_element *fe );
+
+ void lfds610_ringbuffer_query( struct lfds610_ringbuffer_state *rs, enum lfds610_ringbuffer_query_type query_type, void *query_input, void *query_output );
+
+
+
+
+
+ /***** lfds610_slist *****/
+
+ /***** incomplete types *****/
+ struct lfds610_slist_state;
+ struct lfds610_slist_element;
+
+ /***** public prototypes *****/
+ int lfds610_slist_new( struct lfds610_slist_state **ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+ void lfds610_slist_use( struct lfds610_slist_state *ss );
+ void lfds610_slist_delete( struct lfds610_slist_state *ss );
+
+ struct lfds610_slist_element *lfds610_slist_new_head( struct lfds610_slist_state *ss, void *user_data );
+ struct lfds610_slist_element *lfds610_slist_new_next( struct lfds610_slist_element *se, void *user_data );
+
+ int lfds610_slist_logically_delete_element( struct lfds610_slist_state *ss, struct lfds610_slist_element *se );
+ void lfds610_slist_single_threaded_physically_delete_all_elements( struct lfds610_slist_state *ss );
+
+ int lfds610_slist_get_user_data_from_element( struct lfds610_slist_element *se, void **user_data );
+ int lfds610_slist_set_user_data_in_element( struct lfds610_slist_element *se, void *user_data );
+
+ struct lfds610_slist_element *lfds610_slist_get_head( struct lfds610_slist_state *ss, struct lfds610_slist_element **se );
+ struct lfds610_slist_element *lfds610_slist_get_next( struct lfds610_slist_element *se, struct lfds610_slist_element **next_se );
+ struct lfds610_slist_element *lfds610_slist_get_head_and_then_next( struct lfds610_slist_state *ss, struct lfds610_slist_element **se );
+
+
+
+
+
+ /***** lfds610_stack *****/
+
+ /***** enums *****/
+ enum lfds610_stack_query_type
+ {
+ LFDS610_STACK_QUERY_ELEMENT_COUNT,
+ LFDS610_STACK_QUERY_VALIDATE
+ };
+
+ /***** incomplete types *****/
+ struct lfds610_stack_state;
+
+ /***** public prototypes *****/
+ int lfds610_stack_new( struct lfds610_stack_state **ss, lfds610_atom_t number_elements );
+ void lfds610_stack_use( struct lfds610_stack_state *ss );
+ void lfds610_stack_delete( struct lfds610_stack_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+ void lfds610_stack_clear( struct lfds610_stack_state *ss, void (*user_data_clear_function)(void *user_data, void *user_state), void *user_state );
+
+ int lfds610_stack_push( struct lfds610_stack_state *ss, void *user_data );
+ int lfds610_stack_guaranteed_push( struct lfds610_stack_state *ss, void *user_data );
+ int lfds610_stack_pop( struct lfds610_stack_state *ss, void **user_data );
+
+ void lfds610_stack_query( struct lfds610_stack_state *ss, enum lfds610_stack_query_type query_type, void *query_input, void *query_output );
+
+
+
+
+
+ #define __LIBLFDS610_H
+
+#endif
+
--- /dev/null
+EXPORTS
+
+lfds610_liblfds_abstraction_test_helper_increment_non_atomic = lfds610_liblfds_abstraction_test_helper_increment_non_atomic @1
+lfds610_liblfds_abstraction_test_helper_increment_atomic = lfds610_liblfds_abstraction_test_helper_increment_atomic @2
+lfds610_liblfds_abstraction_test_helper_cas = lfds610_liblfds_abstraction_test_helper_cas @3
+lfds610_liblfds_abstraction_test_helper_dcas = lfds610_liblfds_abstraction_test_helper_dcas @4
+
+lfds610_freelist_delete = lfds610_freelist_delete @5
+lfds610_freelist_get_user_data_from_element = lfds610_freelist_get_user_data_from_element @6
+lfds610_freelist_guaranteed_pop = lfds610_freelist_guaranteed_pop @7
+lfds610_freelist_new = lfds610_freelist_new @8
+lfds610_freelist_new_elements = lfds610_freelist_new_elements @9
+lfds610_freelist_pop = lfds610_freelist_pop @10
+lfds610_freelist_push = lfds610_freelist_push @11
+lfds610_freelist_query = lfds610_freelist_query @12
+lfds610_freelist_set_user_data_in_element = lfds610_freelist_set_user_data_in_element @13
+lfds610_freelist_use = lfds610_freelist_use @14
+
+lfds610_queue_delete = lfds610_queue_delete @15
+lfds610_queue_dequeue = lfds610_queue_dequeue @16
+lfds610_queue_enqueue = lfds610_queue_enqueue @17
+lfds610_queue_guaranteed_enqueue = lfds610_queue_guaranteed_enqueue @18
+lfds610_queue_new = lfds610_queue_new @19
+lfds610_queue_query = lfds610_queue_query @20
+lfds610_queue_use = lfds610_queue_use @21
+
+lfds610_ringbuffer_delete = lfds610_ringbuffer_delete @22
+lfds610_ringbuffer_get_read_element = lfds610_ringbuffer_get_read_element @23
+lfds610_ringbuffer_get_write_element = lfds610_ringbuffer_get_write_element @24
+lfds610_ringbuffer_new = lfds610_ringbuffer_new @25
+lfds610_ringbuffer_put_read_element = lfds610_ringbuffer_put_read_element @26
+lfds610_ringbuffer_put_write_element = lfds610_ringbuffer_put_write_element @27
+lfds610_ringbuffer_query = lfds610_ringbuffer_query @28
+lfds610_ringbuffer_use = lfds610_ringbuffer_use @29
+
+lfds610_slist_delete = lfds610_slist_delete @30
+lfds610_slist_get_head = lfds610_slist_get_head @31
+lfds610_slist_get_head_and_then_next = lfds610_slist_get_head_and_then_next @32
+lfds610_slist_get_next = lfds610_slist_get_next @33
+lfds610_slist_get_user_data_from_element = lfds610_slist_get_user_data_from_element @34
+lfds610_slist_logically_delete_element = lfds610_slist_logically_delete_element @35
+lfds610_slist_new = lfds610_slist_new @36
+lfds610_slist_new_head = lfds610_slist_new_head @37
+lfds610_slist_new_next = lfds610_slist_new_next @38
+lfds610_slist_set_user_data_in_element = lfds610_slist_set_user_data_in_element @39
+lfds610_slist_single_threaded_physically_delete_all_elements = lfds610_slist_single_threaded_physically_delete_all_elements @40
+lfds610_slist_use = lfds610_slist_use @41
+
+lfds610_stack_clear = lfds610_stack_clear @42
+lfds610_stack_delete = lfds610_stack_delete @43
+lfds610_stack_guaranteed_push = lfds610_stack_guaranteed_push @44
+lfds610_stack_new = lfds610_stack_new @45
+lfds610_stack_pop = lfds610_stack_pop @46
+lfds610_stack_push = lfds610_stack_push @47
+lfds610_stack_query = lfds610_stack_query @48
+lfds610_stack_use = lfds610_stack_use @49
+
--- /dev/null
+\r
+Microsoft Visual Studio Solution File, Format Version 10.00\r
+# Visual Studio 2008\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblfds610", "liblfds610.vcproj", "{F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug DLL|Win32 = Debug DLL|Win32\r
+ Debug DLL|x64 = Debug DLL|x64\r
+ Debug Lib|Win32 = Debug Lib|Win32\r
+ Debug Lib|x64 = Debug Lib|x64\r
+ Release DLL|Win32 = Release DLL|Win32\r
+ Release DLL|x64 = Release DLL|x64\r
+ Release Lib|Win32 = Release Lib|Win32\r
+ Release Lib|x64 = Release Lib|x64\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.Build.0 = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.ActiveCfg = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.Build.0 = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.ActiveCfg = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.Build.0 = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.ActiveCfg = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.Build.0 = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.ActiveCfg = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.Build.0 = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.ActiveCfg = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.Build.0 = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.ActiveCfg = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.Build.0 = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.ActiveCfg = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.Build.0 = Release Lib|x64\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+<?xml version="1.0" encoding="Windows-1252"?>\r
+<VisualStudioProject\r
+ ProjectType="Visual C++"\r
+ Version="9.00"\r
+ Name="liblfds610"\r
+ ProjectGUID="{F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}"\r
+ RootNamespace="liblfds"\r
+ TargetFrameworkVersion="196613"\r
+ >\r
+ <Platforms>\r
+ <Platform\r
+ Name="Win32"\r
+ />\r
+ <Platform\r
+ Name="x64"\r
+ />\r
+ </Platforms>\r
+ <ToolFiles>\r
+ </ToolFiles>\r
+ <Configurations>\r
+ <Configuration\r
+ Name="Debug Lib|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ MinimalRebuild="true"\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug Lib|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ SmallerTypeCheck="true"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release Lib|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release Lib|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug DLL|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ MinimalRebuild="true"\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ RuntimeLibrary="3"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds610.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug DLL|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ TargetEnvironment="3"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ SmallerTypeCheck="true"\r
+ RuntimeLibrary="3"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds610.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release DLL|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="2"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds610.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release DLL|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ TargetEnvironment="3"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="2"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds610.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ </Configurations>\r
+ <References>\r
+ </References>\r
+ <Files>\r
+ <Filter\r
+ Name="inc"\r
+ Filter="h;hpp;hxx;hm;inl;inc;xsd"\r
+ UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"\r
+ >\r
+ <File\r
+ RelativePath=".\inc\liblfds610.h"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="src"\r
+ >\r
+ <File\r
+ RelativePath=".\src\liblfds610_internal.h"\r
+ >\r
+ </File>\r
+ <Filter\r
+ Name="lfds610_abstraction"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds610_abstraction\lfds610_abstraction_free.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_abstraction\lfds610_abstraction_internal_body.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_abstraction\lfds610_abstraction_internal_wrapper.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_abstraction\lfds610_abstraction_malloc.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds610_freelist"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds610_freelist\lfds610_freelist_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_freelist\lfds610_freelist_get_and_set.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_freelist\lfds610_freelist_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_freelist\lfds610_freelist_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_freelist\lfds610_freelist_pop_push.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_freelist\lfds610_freelist_query.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds610_queue"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds610_queue\lfds610_queue_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_queue\lfds610_queue_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_queue\lfds610_queue_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_queue\lfds610_queue_query.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_queue\lfds610_queue_queue.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds610_ringbuffer"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds610_ringbuffer\lfds610_ringbuffer_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_ringbuffer\lfds610_ringbuffer_get_and_put.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_ringbuffer\lfds610_ringbuffer_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_ringbuffer\lfds610_ringbuffer_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_ringbuffer\lfds610_ringbuffer_query.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds610_slist"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds610_slist\lfds610_slist_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_slist\lfds610_slist_get_and_set.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_slist\lfds610_slist_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_slist\lfds610_slist_link.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_slist\lfds610_slist_new.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds610_stack"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds610_stack\lfds610_stack_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_stack\lfds610_stack_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_stack\lfds610_stack_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_stack\lfds610_stack_push_pop.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_stack\lfds610_stack_query.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds610_liblfds"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds610_liblfds\lfds610_liblfds_abstraction_test_helpers.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_liblfds\lfds610_liblfds_aligned_free.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_liblfds\lfds610_liblfds_aligned_malloc.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds610_liblfds\lfds610_liblfds_internal.h"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ </Filter>\r
+ </Files>\r
+ <Globals>\r
+ </Globals>\r
+</VisualStudioProject>\r
--- /dev/null
+##### paths #####
+BINDIR = bin
+INCDIR = inc
+OBJDIR = obj
+SRCDIR = src
+
+##### misc #####
+QUIETLY = 1>/dev/null 2>/dev/null
+
+##### sources, objects and libraries #####
+BINNAME = liblfds610
+AR_BINARY = $(BINDIR)/$(BINNAME).a
+SO_BINARY = $(BINDIR)/$(BINNAME).so
+SRCDIRS = lfds610_abstraction lfds610_freelist lfds610_liblfds lfds610_queue lfds610_ringbuffer lfds610_slist lfds610_stack
+# TRD : be aware - in the linux makefile, with the one-pass linking behaviour of the GNU linker, the order
+# of source files matters! this is because it leads to the ordering of objects in the library and
+# that in turn, since the data structures all use the freelist API and the abstraction API, has to be
+# correct
+# TRD : lfds610_abstraction_cas.c lfds610_abstraction_dcas.c lfds610_abstraction_increment.c are inlined and are compiled by every C file
+SOURCES = lfds610_queue_delete.c lfds610_queue_new.c lfds610_queue_query.c lfds610_queue_queue.c \
+ lfds610_ringbuffer_delete.c lfds610_ringbuffer_get_and_put.c lfds610_ringbuffer_new.c lfds610_ringbuffer_query.c \
+ lfds610_slist_delete.c lfds610_slist_get_and_set.c lfds610_slist_link.c lfds610_slist_new.c \
+ lfds610_stack_delete.c lfds610_stack_new.c lfds610_stack_push_pop.c lfds610_stack_query.c \
+ lfds610_freelist_delete.c lfds610_freelist_get_and_set.c lfds610_freelist_new.c lfds610_freelist_query.c lfds610_freelist_pop_push.c \
+ lfds610_liblfds_abstraction_test_helpers.c lfds610_liblfds_aligned_free.c lfds610_liblfds_aligned_malloc.c \
+ lfds610_abstraction_free.c lfds610_abstraction_malloc.c
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))
+
+##### CPU variants #####
+UNAME = $(shell uname -m)
+GCCARCH = -march=$(UNAME)
+
+ifeq ($(UNAME),x86_64)
+ GCCARCH = -march=core2
+endif
+
+ifeq ($(findstring arm,$(UNAME)),arm)
+ GCCARCH = -march=armv6k -marm
+endif
+
+##### tools #####
+MAKE = make
+MFLAGS =
+
+DG = gcc
+DGFLAGS = -MM -std=c99 -I"$(SRCDIR)" -I"$(INCDIR)"
+
+CC = gcc
+CBASE = -Wall -Wno-unknown-pragmas -std=c99 $(GCCARCH) -pthread -c -I"$(SRCDIR)" -I"$(INCDIR)"
+CFREL = -O2 -finline-functions -Wno-strict-aliasing
+CFDBG = -O0 -g
+
+AR = ar
+AFLAGS = -rcs
+
+LD = gcc
+LFBASE = -Wall -std=c99 -shared
+LFREL = -O2 -s
+LFDBG = -O0 -g
+
+##### rel/dbg .a/.so variants #####
+ifeq ($(findstring so,$(MAKECMDGOALS)),so)
+ CBASE := $(CBASE) -fpic
+endif
+
+CFLAGS = $(CBASE) $(CFDBG)
+LFLAGS = $(LFBASE) $(LFDBG)
+
+ifeq ($(findstring rel,$(MAKECMDGOALS)),rel)
+ CFLAGS = $(CBASE) $(CFREL)
+ LFLAGS = $(LFBASE) $(LFREL)
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.o : %.c
+ $(DG) $(DGFLAGS) $< >$(OBJDIR)/$*.d
+ $(CC) $(CFLAGS) -o $@ $<
+
+##### explicit rules #####
+$(AR_BINARY) : $(OBJECTS)
+ $(AR) $(AFLAGS) $(AR_BINARY) $(OBJECTS)
+
+$(SO_BINARY) : $(OBJECTS)
+ $(LD) $(LFLAGS) $(SYSLIBS) $(OBJECTS) -o $(SO_BINARY)
+
+##### phony #####
+.PHONY : clean arrel ardbg sorel sodbg
+
+clean :
+ @rm -f $(BINDIR)/$(BINNAME).* $(OBJDIR)/*.o $(OBJDIR)/*.d
+
+arrel : $(AR_BINARY)
+ardbg : $(AR_BINARY)
+
+sorel : $(SO_BINARY)
+sodbg : $(SO_BINARY)
+
+##### dependencies #####
+-include $(DEPENDS)
+
+##### notes #####
+# TRD : we use -std=c99 purely to permit C++ style comments
+
--- /dev/null
+##### paths #####
+BINDIR = bin
+INCDIR = inc
+OBJDIR = obj
+SRCDIR = src
+
+##### misc #####
+QUIETLY = 1>nul 2>nul
+
+##### sources, objects and libraries #####
+BINNAME = liblfds610
+LIB_BINARY = $(BINDIR)\$(BINNAME).lib
+DLL_BINARY = $(BINDIR)\$(BINNAME).dll
+SRCDIRS = lfds610_abstraction lfds610_freelist lfds610_liblfds lfds610_queue lfds610_ringbuffer lfds610_slist lfds610_stack
+# TRD : lfds610_abstraction_cas.c lfds610_abstraction_dcas.c lfds610_abstraction_increment.c are inlined and are compiled by every C file
+SOURCES = lfds610_abstraction_free.c lfds610_abstraction_malloc.c \
+ lfds610_freelist_delete.c lfds610_freelist_get_and_set.c lfds610_freelist_new.c lfds610_freelist_query.c lfds610_freelist_pop_push.c \
+ lfds610_liblfds_abstraction_test_helpers.c lfds610_liblfds_aligned_free.c lfds610_liblfds_aligned_malloc.c \
+ lfds610_queue_delete.c lfds610_queue_new.c lfds610_queue_query.c lfds610_queue_queue.c \
+ lfds610_ringbuffer_delete.c lfds610_ringbuffer_get_and_put.c lfds610_ringbuffer_new.c lfds610_ringbuffer_query.c \
+ lfds610_slist_delete.c lfds610_slist_get_and_set.c lfds610_slist_link.c lfds610_slist_new.c \
+ lfds610_stack_delete.c lfds610_stack_new.c lfds610_stack_push_pop.c lfds610_stack_query.c
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))
+SYSLIBS = kernel32.lib
+
+##### tools #####
+MAKE = make
+MFLAGS =
+
+CC = cl
+CBASE = /nologo /W4 /WX /c "-I$(SRCDIR)" "-I$(INCDIR)" "/Fd$(BINDIR)\$(BINNAME).pdb" /DUNICODE /D_UNICODE /DWIN32_LEAN_AND_MEAN
+CFREL = /Ox /DNDEBUG
+CFDBG = /Od /Gm /Zi /D_DEBUG
+
+AR = lib
+AFLAGS = /nologo /subsystem:console /wx /verbose
+
+LD = link
+LFBASE = /dll /def:$(BINNAME).def /nologo /subsystem:console /wx /nodefaultlib /nxcompat
+LFREL = /incremental:no
+LFDBG = /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"
+
+##### variants #####
+CFLAGS = $(CBASE) $(CFDBG) /MTd
+LFLAGS = $(LFBASE) $(LFDBG)
+CLIB = libcmtd.lib
+
+ifeq ($(MAKECMDGOALS),librel)
+ CFLAGS = $(CBASE) $(CFREL) /MT
+ LFLAGS = $(LFBASE) $(LFREL)
+ CLIB = libcmt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),libdbg)
+ CFLAGS = $(CBASE) $(CFDBG) /MTd
+ LFLAGS = $(LFBASE) $(LFDBG)
+ CLIB = libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dllrel)
+ CFLAGS = $(CBASE) $(CFREL) /MD
+ LFLAGS = $(LFBASE) $(LFREL)
+ CLIB = msvcrt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dlldbg)
+ CFLAGS = $(CBASE) $(CFDBG) /MDd
+ LFLAGS = $(LFBASE) $(LFDBG)
+ CLIB = msvcrtd.lib
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.obj : %.c
+ $(CC) $(CFLAGS) "/Fo$@" $<
+
+##### explicit rules #####
+$(LIB_BINARY) : $(OBJECTS)
+ $(AR) $(AFLAGS) $(OBJECTS) /out:$(LIB_BINARY)
+
+$(DLL_BINARY) : $(OBJECTS)
+ $(LD) $(LFLAGS) $(CLIB) $(SYSLIBS) $(OBJECTS) /out:$(DLL_BINARY)
+
+##### phony #####
+.PHONY : clean librel libdbg dllrel dlldbg
+
+clean :
+ @erase /Q $(BINDIR)\$(BINNAME).* $(OBJDIR)\*.obj $(QUIETLY)
+
+librel : $(LIB_BINARY)
+libdbg : $(LIB_BINARY)
+
+dllrel : $(DLL_BINARY)
+dlldbg : $(DLL_BINARY)
+
--- /dev/null
+introduction
+============
+Welcome to liblfds, a portable, license-free, lock-free data structure library
+written in C.
+
+supported platforms
+===================
+Out-of-the-box ports are provided for;
+
+Operating System CPU Toolchain Choices
+================ ============= =================
+Windows 64-bit x64 1. Microsoft Visual Studio
+ 2. Microsoft Windows SDK and GNUmake
+
+Windows 32-bit x64, x86 1. Microsoft Visual Studio
+ 2. Visual C++ Express Edition
+ 3. Microsoft Windows SDK and GNUmake
+
+Windows Kernel x64, x86 1. Windows Driver Kit
+
+Linux 64-bit x64 1. GCC and GNUmake
+
+Linux 32-bit x64, x86, ARM 1. GCC and GNUmake
+
+For more information including version requirements, see the building guide (lfds).
+
+data structures
+===============
+This release of liblfds provides the following;
+
+ * Freelist
+ * Queue
+ * Ringbuffer (each element read by a single reader)
+ * Singly-linked list (logical delete only)
+ * Stack
+
+These are all many-readers, many-writers.
+
+liblfds on-line
+===============
+On the liblfds home page, you will find the blog, a bugzilla, a forum, a
+mediawiki and the current and all historical releases.
+
+The mediawiki contains comprehensive documentation for development, building,
+testing and porting.
+
+http://www.liblfds.org
+
+license
+=======
+There is no license. You are free to use this code in any way.
+
+using
+=====
+Once built, there is a single header file, /inc/liblfds.h, which you must include
+in your source code, and a single library file /bin/liblfds.*, where the suffix
+depends on your platform and your build choice (static or dynamic), to which,
+if statically built, you must link directly or, if dynamically built, you must
+arrange your system such that the library can be found by the loader at run-time.
+
+testing
+=======
+The library comes with a command line test and benchmark program. This program
+requires threads. As such, it is only suitable for platforms which can execute
+a command line binary and provide thread support. Currently this means the test
+and benchmark program works for all platforms except the Windows Kernel.
+
+For documentation, see the testing and benchmarking guide in the mediawiki.
+
+porting
+=======
+Both the test program and liblfds provide an abstraction layer which acts to
+mask platform differences. Porting is the act of implementing on your platform
+the functions which make up the abstraction layers. You do not need to port
+the test program to port liblfds, but obviously it is recommended, so you can
+test your port.
+
+To support liblfds, your platform MUST support;
+
+ * atomic single-word* increment
+ * atomic single-word compare-and-swap
+ * atomic contiguous double-word compare-and-swap*
+ * malloc and free
+ * compiler directive for alignment of variables declared on the stack
+ * compiler directives for compiler barriers and processor barriers
+
+* A ''word'' here means a type equal in length to the platform pointer size.
+* This requirement excludes the Alpha, IA64, MIPS, PowerPC and SPARC platforms.
+
+Also, your platform MAY support;
+
+ * compiler keyword for function inlining
+
+To support the test programme, your platform MUST support;
+
+ * determining the number of logical cores
+ * threads (starting and waiting on for completion)
+
+For documentation, see the porting guide (lfds) in the mediawiki.
+
+release history
+===============
+release 1, 25th September 2009, svn revision 1574.
+ - initial release
+
+release 2, 5th October 2009, svn revision 1599.
+ - added abstraction layer for Windows kernel
+ - minor code tidyups/fixes
+
+release 3, 25th October 2009, svn revision 1652.
+ - added singly linked list (logical delete only)
+ - minor code tidyups/fixes
+
+release 4, 7th December 2009, svn revision 1716.
+ - added ARM support
+ - added benchmarking functionality to the test program
+ - fixed a profound and pervasive pointer
+ declaration bug; earlier releases of liblfds
+ *should not be used*
+
+release 5, 19th December 2009, svn revision 1738.
+ - fixed subtle queue bug, which also affected ringbuffer
+ and caused data re-ordering under high load
+ - added benchmarks for freelist, ringbuffer and stack
+
+release 6, 29th December 2009, svn revision 1746.
+ - fixed two implementation errors, which reduced performance,
+ spotted by Codeplug from "http://cboard.cprogramming.com".
+
+release 6.0.0, 18th December 2012, svn revision 2537
+ - introduction of namespaces, e.g. the "lfds600_" prefix
+ code otherwise COMPLETELY AND WHOLLY UNCHANGED
+ this release is a stepping-stone to 6.1.0
+
+release 6.1.0, 31th December 2012, svn revision 2600
+ - fixed all existing non-enhancement bugs
+ - discovered some new bugs and fixed them too
+ - a very few minor changes/enhancements
+
--- /dev/null
+The Windows kernel build environment is primitive and has a number
+of severe limitations; in particular, all source files must be in
+one directory and it is not possible to choose the output binary type
+(static or dynamic library) from the build command line; rather,
+a string has to be modified in a text file used by the build (!)
+
+To deal with these limitations, it is necessary for a Windows kernel
+build to run a batch file prior to building.
+
+There are two batch files, one for static library builds and the other
+for dynamic library builds.
+
+They are both idempotent; you can run them as often as you like and
+switch between them as often as you want. It's all fine; whenever
+you run one of them, it will take you from whatever state you were
+previously in, into the state you want to be in.
+
+Both batch files copy all the sources file into a single directory,
+"/src/single_dir_for_windows_kernel/".
+
+The static library batch file will then copy "/sources.static" into
+"/src/single_dir_for_windows_kernel/", which will cause a static
+library to be built.
+
+The dynamic library batch file will then copy "/sources.dynamic" into
+"/src/single_dir_for_windows_kernel/", which will cause a dynamic
+library to be built. It will also copy "src/driver_entry.c" into
+"/src/single_dir_for_windows_kernel/", since the linker requires
+the DriverEntry function to exist for dynamic libraries, even
+though it's not used.
+
+
--- /dev/null
+@echo off
+rmdir /q /s src\single_dir_for_windows_kernel 1>nul 2>nul
+mkdir src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_abstraction\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_freelist\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_liblfds\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_queue\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_ringbuffer\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_slist\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_stack\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y sources.dynamic src\single_dir_for_windows_kernel\sources 1>nul 2>nul
+copy /y src\driver_entry.c src\single_dir_for_windows_kernel 1>nul 2>nul
+echo Windows kernel dynamic library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+@echo off
+rmdir /q /s src\single_dir_for_windows_kernel 1>nul 2>nul
+mkdir src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_abstraction\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_freelist\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_liblfds\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_queue\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_ringbuffer\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_slist\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y src\lfds610_stack\* src\single_dir_for_windows_kernel 1>nul 2>nul
+copy /y sources.static src\single_dir_for_windows_kernel\sources 1>nul 2>nul
+erase /f src\single_dir_for_windows_kernel\driver_entry.c 1>nul 2>nul
+echo Windows kernel static library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /W4
+DLLDEF = ../../liblfds610.def
+TARGETNAME = liblfds610
+TARGETPATH = ../../bin/
+TARGETTYPE = EXPORT_DRIVER
+UMTYPE = nt
+USER_C_FLAGS = /DWIN_KERNEL_BUILD
+
+INCLUDES = ..;../../inc/
+SOURCES = lfds610_abstraction_free.c \
+ lfds610_abstraction_malloc.c \
+ lfds610_freelist_delete.c \
+ lfds610_freelist_get_and_set.c \
+ lfds610_freelist_new.c \
+ lfds610_freelist_pop_push.c \
+ lfds610_freelist_query.c \
+ lfds610_liblfds_abstraction_test_helpers.c \
+ lfds610_liblfds_aligned_free.c \
+ lfds610_liblfds_aligned_malloc.c \
+ lfds610_queue_delete.c \
+ lfds610_queue_new.c \
+ lfds610_queue_query.c \
+ lfds610_queue_queue.c \
+ lfds610_ringbuffer_delete.c \
+ lfds610_ringbuffer_get_and_put.c \
+ lfds610_ringbuffer_new.c \
+ lfds610_ringbuffer_query.c \
+ lfds610_slist_delete.c \
+ lfds610_slist_get_and_set.c \
+ lfds610_slist_link.c \
+ lfds610_slist_new.c \
+ lfds610_stack_delete.c \
+ lfds610_stack_new.c \
+ lfds610_stack_push_pop.c \
+ lfds610_stack_query.c \
+ driver_entry.c
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /W4
+TARGETNAME = liblfds610
+TARGETPATH = ../../bin/
+TARGETTYPE = DRIVER_LIBRARY
+UMTYPE = nt
+USER_C_FLAGS = /DWIN_KERNEL_BUILD
+
+INCLUDES = ..;../../inc/
+SOURCES = lfds610_abstraction_free.c \
+ lfds610_abstraction_malloc.c \
+ lfds610_freelist_delete.c \
+ lfds610_freelist_get_and_set.c \
+ lfds610_freelist_new.c \
+ lfds610_freelist_pop_push.c \
+ lfds610_freelist_query.c \
+ lfds610_liblfds_abstraction_test_helpers.c \
+ lfds610_liblfds_aligned_free.c \
+ lfds610_liblfds_aligned_malloc.c \
+ lfds610_queue_delete.c \
+ lfds610_queue_new.c \
+ lfds610_queue_query.c \
+ lfds610_queue_queue.c \
+ lfds610_ringbuffer_delete.c \
+ lfds610_ringbuffer_get_and_put.c \
+ lfds610_ringbuffer_new.c \
+ lfds610_ringbuffer_query.c \
+ lfds610_slist_delete.c \
+ lfds610_slist_get_and_set.c \
+ lfds610_slist_link.c \
+ lfds610_slist_new.c \
+ lfds610_stack_delete.c \
+ lfds610_stack_new.c \
+ lfds610_stack_push_pop.c \
+ lfds610_stack_query.c
+
--- /dev/null
+DIRS = single_dir_for_windows_kernel
+
+
--- /dev/null
+#include "liblfds610_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+NTSTATUS DriverEntry( struct _DRIVER_OBJECT *DriverObject, PUNICODE_STRING RegistryPath )
+{
+ return( STATUS_SUCCESS );
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+This C file (driver_entry.c) is used when building a dynamic library for
+the Windows kernel. It exists to work around one of the limitations of
+that build environment. It is not used by any other build; just ignore it.
+
--- /dev/null
+#include "lfds610_abstraction_internal_body.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER)
+
+ /* TRD : 64 bit and 32 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ static LFDS610_INLINE lfds610_atom_t lfds610_abstraction_cas( volatile lfds610_atom_t *destination, lfds610_atom_t exchange, lfds610_atom_t compare )
+ {
+ assert( destination != NULL );
+ // TRD : exchange can be any value in its range
+ // TRD : compare can be any value in its range
+
+ return( (lfds610_atom_t) _InterlockedCompareExchangePointer((void * volatile *) destination, (void *) exchange, (void *) compare) );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1 && __GNUC_PATCHLEVEL__ >= 0)
+
+ /* TRD : any OS on any CPU with GCC 4.1.0 or better
+
+ GCC 4.1.0 introduced the __sync_*() atomic intrinsics
+
+ __GNUC__ / __GNUC_MINOR__ / __GNUC_PATCHLEVEL__ indicates GCC and which version
+ */
+
+ static LFDS610_INLINE lfds610_atom_t lfds610_abstraction_cas( volatile lfds610_atom_t *destination, lfds610_atom_t exchange, lfds610_atom_t compare )
+ {
+ assert( destination != NULL );
+ // TRD : exchange can be any value in its range
+ // TRD : compare can be any value in its range
+
+ // TRD : note the different argument order for the GCC instrinsic to the MSVC instrinsic
+
+ return( (lfds610_atom_t) __sync_val_compare_and_swap(destination, compare, exchange) );
+ }
+
+#endif
+
--- /dev/null
+#include "lfds610_abstraction_internal_body.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN64 && defined _MSC_VER)
+
+ /* TRD : 64 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ _WIN64 indicates 64 bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ static LFDS610_INLINE unsigned char lfds610_abstraction_dcas( volatile lfds610_atom_t *destination, lfds610_atom_t *exchange, lfds610_atom_t *compare )
+ {
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ return( _InterlockedCompareExchange128((volatile __int64 *) destination, (__int64) *(exchange+1), (__int64) *exchange, (__int64 *) compare) );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER)
+
+ /* TRD : 32 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ (!defined _WIN64 && defined _WIN32) indicates 32 bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ static LFDS610_INLINE unsigned char lfds610_abstraction_dcas( volatile lfds610_atom_t *destination, lfds610_atom_t *exchange, lfds610_atom_t *compare )
+ {
+ __int64
+ original_compare;
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ *(__int64 *) &original_compare = *(__int64 *) compare;
+
+ *(__int64 *) compare = _InterlockedCompareExchange64( (volatile __int64 *) destination, *(__int64 *) exchange, *(__int64 *) compare );
+
+ return( (unsigned char) (*(__int64 *) compare == *(__int64 *) &original_compare) );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined __x86_64__ && defined __GNUC__)
+
+ /* TRD : any OS on x64 with GCC
+
+ __x86_64__ indicates x64
+ __GNUC__ indicates GCC
+ */
+
+ static LFDS610_INLINE unsigned char lfds610_abstraction_dcas( volatile lfds610_atom_t *destination, lfds610_atom_t *exchange, lfds610_atom_t *compare )
+ {
+ unsigned char
+ cas_result;
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ __asm__ __volatile__
+ (
+ "lock;" // make cmpxchg16b atomic
+ "cmpxchg16b %0;" // cmpxchg16b sets ZF on success
+ "setz %3;" // if ZF set, set cas_result to 1
+
+ // output
+ : "+m" (*(volatile lfds610_atom_t (*)[2]) destination), "+a" (*compare), "+d" (*(compare+1)), "=q" (cas_result)
+
+ // input
+ : "b" (*exchange), "c" (*(exchange+1))
+
+ // clobbered
+ : "cc", "memory"
+ );
+
+ return( cas_result );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if ((defined __i686__ || defined __arm__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 1 && __GNUC_PATCHLEVEL__ >= 0)
+
+ /* TRD : any OS on x86 or ARM with GCC 4.1.0 or better
+
+ GCC 4.1.0 introduced the __sync_*() atomic intrinsics
+
+ __GNUC__ / __GNUC_MINOR__ / __GNUC_PATCHLEVEL__ indicates GCC and which version
+ */
+
+ static LFDS610_INLINE unsigned char lfds610_abstraction_dcas( volatile lfds610_atom_t *destination, lfds610_atom_t *exchange, lfds610_atom_t *compare )
+ {
+ unsigned char
+ cas_result = 0;
+
+ unsigned long long int
+ original_destination;
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ original_destination = __sync_val_compare_and_swap( (volatile unsigned long long int *) destination, *(unsigned long long int *) compare, *(unsigned long long int *) exchange );
+
+ if( original_destination == *(unsigned long long int *) compare )
+ cas_result = 1;
+
+ *(unsigned long long int *) compare = original_destination;
+
+ return( cas_result );
+ }
+
+#endif
+
+
--- /dev/null
+#include "lfds610_abstraction_internal_wrapper.h"
+
+
+
+
+
+/****************************************************************************/
+#if (!defined WIN_KERNEL_BUILD)
+
+ /* TRD : any OS except Windows kernel on any CPU with any compiler
+
+ !WIN_KERNEL_BUILD indicates not Windows kernel
+ */
+
+ void lfds610_abstraction_free( void *memory )
+ {
+ free( memory );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows (kernel) on any CPU with the Microsoft C compiler
+
+ WIN_KERNEL_BUILD indicates Windows kernel
+ */
+
+ void lfds610_abstraction_free( void *memory )
+ {
+ ExFreePoolWithTag( memory, 'sdfl' );
+
+ return;
+ }
+
+#endif
+
--- /dev/null
+#include "lfds610_abstraction_internal_body.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN64 && defined _MSC_VER)
+
+ /* TRD : 64 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ _WIN64 indicates 64 bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ static LFDS610_INLINE lfds610_atom_t lfds610_abstraction_increment( volatile lfds610_atom_t *value )
+ {
+ assert( value != NULL );
+
+ return( (lfds610_atom_t) _InterlockedIncrement64((__int64 *) value) );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER)
+
+ /* TRD : 32 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ (!defined _WIN64 && defined _WIN32) indicates 32 bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ static LFDS610_INLINE lfds610_atom_t lfds610_abstraction_increment( volatile lfds610_atom_t *value )
+ {
+ assert( value != NULL );
+
+ return( (lfds610_atom_t) _InterlockedIncrement((long int *) value) );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1 && __GNUC_PATCHLEVEL__ >= 0)
+
+ /* TRD : any OS on any CPU with GCC 4.1.0 or better
+
+ GCC 4.1.0 introduced the __sync_*() atomic intrinsics
+
+ __GNUC__ / __GNUC_MINOR__ / __GNUC_PATCHLEVEL__ indicates GCC and which version
+ */
+
+ static LFDS610_INLINE lfds610_atom_t lfds610_abstraction_increment( volatile lfds610_atom_t *value )
+ {
+ assert( value != NULL );
+
+ // TRD : no need for casting here, GCC has a __sync_add_and_fetch() for all native types
+
+ return( (lfds610_atom_t) __sync_add_and_fetch(value, 1) );
+ }
+
+#endif
+
--- /dev/null
+/***** private prototypes *****/
+
--- /dev/null
+/***** the library wide include file *****/
+#include "liblfds610_internal.h"
+
+/***** the internal header body *****/
+#include "lfds610_abstraction_internal_body.h"
+
--- /dev/null
+#include "lfds610_abstraction_internal_wrapper.h"
+
+
+
+
+
+/****************************************************************************/
+#if (!defined WIN_KERNEL_BUILD)
+
+ /* TRD : any OS except Windows kernel on any CPU with any compiler
+
+ !WIN_KERNEL_BUILD indicates not Windows kernel
+ */
+
+ void *lfds610_abstraction_malloc( size_t size )
+ {
+ return( malloc(size) );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows (kernel) on any CPU with the Microsoft C compiler
+
+ WIN_KERNEL_BUILD indicates Windows kernel
+ */
+
+ void *lfds610_abstraction_malloc( size_t size )
+ {
+ return( ExAllocatePoolWithTag(NonPagedPool, size, 'sdfl') );
+ }
+
+#endif
+
--- /dev/null
+#include "lfds610_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds610_freelist_delete( struct lfds610_freelist_state *fs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )
+{
+ struct lfds610_freelist_element
+ *fe;
+
+ void
+ *user_data;
+
+ assert( fs != NULL );
+ // TRD : user_data_delete_function can be NULL
+ // TRD : user_state can be NULL
+
+ // TRD : leading load barrier not required as it will be performed by the pop
+
+ while( lfds610_freelist_pop(fs, &fe) )
+ {
+ if( user_data_delete_function != NULL )
+ {
+ lfds610_freelist_get_user_data_from_element( fe, &user_data );
+ user_data_delete_function( user_data, user_state );
+ }
+
+ lfds610_liblfds_aligned_free( fe );
+ }
+
+ lfds610_liblfds_aligned_free( fs );
+
+ return;
+}
+
--- /dev/null
+#include "lfds610_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void *lfds610_freelist_get_user_data_from_element( struct lfds610_freelist_element *fe, void **user_data )
+{
+ assert( fe != NULL );
+ // TRD : user_data can be NULL
+
+ LFDS610_BARRIER_LOAD;
+
+ if( user_data != NULL )
+ *user_data = fe->user_data;
+
+ return( fe->user_data );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_freelist_set_user_data_in_element( struct lfds610_freelist_element *fe, void *user_data )
+{
+ assert( fe != NULL );
+ // TRD : user_data can be NULL
+
+ fe->user_data = user_data;
+
+ LFDS610_BARRIER_STORE;
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "liblfds610_internal.h"
+
+/***** defines *****/
+#define LFDS610_FREELIST_POINTER 0
+#define LFDS610_FREELIST_COUNTER 1
+#define LFDS610_FREELIST_PAC_SIZE 2
+
+/***** structures *****/
+#pragma pack( push, LFDS610_ALIGN_DOUBLE_POINTER )
+
+struct lfds610_freelist_state
+{
+ struct lfds610_freelist_element
+ *volatile top[LFDS610_FREELIST_PAC_SIZE];
+
+ int
+ (*user_data_init_function)( void **user_data, void *user_state );
+
+ void
+ *user_state;
+
+ lfds610_atom_t
+ aba_counter,
+ element_count;
+};
+
+struct lfds610_freelist_element
+{
+ struct lfds610_freelist_element
+ *next[LFDS610_FREELIST_PAC_SIZE];
+
+ void
+ *user_data;
+};
+
+#pragma pack( pop )
+
+/***** private prototypes *****/
+lfds610_atom_t lfds610_freelist_internal_new_element( struct lfds610_freelist_state *fs, struct lfds610_freelist_element **fe );
+void lfds610_freelist_internal_validate( struct lfds610_freelist_state *fs, struct lfds610_validation_info *vi, enum lfds610_data_structure_validity *lfds610_freelist_validity );
+
--- /dev/null
+#include "lfds610_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds610_freelist_new( struct lfds610_freelist_state **fs, lfds610_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state )
+{
+ int
+ rv = 0;
+
+ lfds610_atom_t
+ element_count;
+
+ assert( fs != NULL );
+ // TRD : number_elements can be any value in its range
+ // TRD : user_data_init_function can be NULL
+
+ *fs = (struct lfds610_freelist_state *) lfds610_liblfds_aligned_malloc( sizeof(struct lfds610_freelist_state), LFDS610_ALIGN_DOUBLE_POINTER );
+
+ if( (*fs) != NULL )
+ {
+ (*fs)->top[LFDS610_FREELIST_POINTER] = NULL;
+ (*fs)->top[LFDS610_FREELIST_COUNTER] = 0;
+ (*fs)->user_data_init_function = user_data_init_function;
+ (*fs)->user_state = user_state;
+ (*fs)->aba_counter = 0;
+ (*fs)->element_count = 0;
+
+ element_count = lfds610_freelist_new_elements( *fs, number_elements );
+
+ if( element_count == number_elements )
+ rv = 1;
+
+ if( element_count != number_elements )
+ {
+ lfds610_liblfds_aligned_free( (*fs) );
+ *fs = NULL;
+ }
+ }
+
+ LFDS610_BARRIER_STORE;
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds610_freelist_use( struct lfds610_freelist_state *fs )
+{
+ assert( fs != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+lfds610_atom_t lfds610_freelist_new_elements( struct lfds610_freelist_state *fs, lfds610_atom_t number_elements )
+{
+ struct lfds610_freelist_element
+ *fe;
+
+ lfds610_atom_t
+ loop,
+ count = 0;
+
+ assert( fs != NULL );
+ // TRD : number_elements can be any value in its range
+ // TRD : user_data_init_function can be NULL
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( lfds610_freelist_internal_new_element(fs, &fe) )
+ {
+ lfds610_freelist_push( fs, fe );
+ count++;
+ }
+
+ return( count );
+}
+
+
+
+
+
+/****************************************************************************/
+lfds610_atom_t lfds610_freelist_internal_new_element( struct lfds610_freelist_state *fs, struct lfds610_freelist_element **fe )
+{
+ lfds610_atom_t
+ rv = 0;
+
+ assert( fs != NULL );
+ assert( fe != NULL );
+
+ /* TRD : basically, does what you'd expect;
+
+ allocates an element
+ calls the user init function
+ if anything fails, cleans up,
+ sets *fe to NULL
+ and returns 0
+ */
+
+ *fe = (struct lfds610_freelist_element *) lfds610_liblfds_aligned_malloc( sizeof(struct lfds610_freelist_element), LFDS610_ALIGN_DOUBLE_POINTER );
+
+ if( *fe != NULL )
+ {
+ if( fs->user_data_init_function == NULL )
+ {
+ (*fe)->user_data = NULL;
+ rv = 1;
+ }
+
+ if( fs->user_data_init_function != NULL )
+ {
+ rv = fs->user_data_init_function( &(*fe)->user_data, fs->user_state );
+
+ if( rv == 0 )
+ {
+ lfds610_liblfds_aligned_free( *fe );
+ *fe = NULL;
+ }
+ }
+ }
+
+ if( rv == 1 )
+ lfds610_abstraction_increment( (lfds610_atom_t *) &fs->element_count );
+
+ return( rv );
+}
+
--- /dev/null
+#include "lfds610_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+struct lfds610_freelist_element *lfds610_freelist_pop( struct lfds610_freelist_state *fs, struct lfds610_freelist_element **fe )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) struct lfds610_freelist_element
+ *fe_local[LFDS610_FREELIST_PAC_SIZE];
+
+ assert( fs != NULL );
+ assert( fe != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ fe_local[LFDS610_FREELIST_COUNTER] = fs->top[LFDS610_FREELIST_COUNTER];
+ fe_local[LFDS610_FREELIST_POINTER] = fs->top[LFDS610_FREELIST_POINTER];
+
+ /* TRD : note that lfds610_abstraction_dcas loads the original value of the destination (fs->top) into the compare (fe_local)
+ (this happens of course after the CAS itself has occurred inside lfds610_abstraction_dcas)
+ */
+
+ do
+ {
+ if( fe_local[LFDS610_FREELIST_POINTER] == NULL )
+ {
+ *fe = NULL;
+ return( *fe );
+ }
+ }
+ while( 0 == lfds610_abstraction_dcas((volatile lfds610_atom_t *) fs->top, (lfds610_atom_t *) fe_local[LFDS610_FREELIST_POINTER]->next, (lfds610_atom_t *) fe_local) );
+
+ *fe = (struct lfds610_freelist_element *) fe_local[LFDS610_FREELIST_POINTER];
+
+ return( *fe );
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds610_freelist_element *lfds610_freelist_guaranteed_pop( struct lfds610_freelist_state *fs, struct lfds610_freelist_element **fe )
+{
+ assert( fs != NULL );
+ assert( fe != NULL );
+
+ lfds610_freelist_internal_new_element( fs, fe );
+
+ return( *fe );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_freelist_push( struct lfds610_freelist_state *fs, struct lfds610_freelist_element *fe )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) struct lfds610_freelist_element
+ *fe_local[LFDS610_FREELIST_PAC_SIZE],
+ *original_fe_next[LFDS610_FREELIST_PAC_SIZE];
+
+ assert( fs != NULL );
+ assert( fe != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ fe_local[LFDS610_FREELIST_POINTER] = fe;
+ fe_local[LFDS610_FREELIST_COUNTER] = (struct lfds610_freelist_element *) lfds610_abstraction_increment( (lfds610_atom_t *) &fs->aba_counter );
+
+ original_fe_next[LFDS610_FREELIST_POINTER] = fs->top[LFDS610_FREELIST_POINTER];
+ original_fe_next[LFDS610_FREELIST_COUNTER] = fs->top[LFDS610_FREELIST_COUNTER];
+
+ /* TRD : note that lfds610_abstraction_dcas loads the original value of the destination (fs->top) into the compare (original_fe_next)
+ (this happens of course after the CAS itself has occurred inside lfds610_abstraction_dcas)
+ this then causes us in our loop, should we repeat it, to update fe_local->next to a more
+ up-to-date version of the head of the lfds610_freelist
+ */
+
+ do
+ {
+ fe_local[LFDS610_FREELIST_POINTER]->next[LFDS610_FREELIST_POINTER] = original_fe_next[LFDS610_FREELIST_POINTER];
+ fe_local[LFDS610_FREELIST_POINTER]->next[LFDS610_FREELIST_COUNTER] = original_fe_next[LFDS610_FREELIST_COUNTER];
+ }
+ while( 0 == lfds610_abstraction_dcas((volatile lfds610_atom_t *) fs->top, (lfds610_atom_t *) fe_local, (lfds610_atom_t *) original_fe_next) );
+
+ return;
+}
+
--- /dev/null
+#include "lfds610_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds610_freelist_query( struct lfds610_freelist_state *fs, enum lfds610_freelist_query_type query_type, void *query_input, void *query_output )
+{
+ assert( fs != NULL );
+ // TRD : query type can be any value in its range
+ // TRD : query_input can be NULL in some cases
+ assert( query_output != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS610_FREELIST_QUERY_ELEMENT_COUNT:
+ assert( query_input == NULL );
+
+ *(lfds610_atom_t *) query_output = fs->element_count;
+ break;
+
+ case LFDS610_FREELIST_QUERY_VALIDATE:
+ // TRD : query_input can be NULL
+
+ lfds610_freelist_internal_validate( fs, (struct lfds610_validation_info *) query_input, (enum lfds610_data_structure_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_freelist_internal_validate( struct lfds610_freelist_state *fs, struct lfds610_validation_info *vi, enum lfds610_data_structure_validity *lfds610_freelist_validity )
+{
+ struct lfds610_freelist_element
+ *fe,
+ *fe_slow,
+ *fe_fast;
+
+ lfds610_atom_t
+ element_count = 0;
+
+ assert( fs != NULL );
+ // TRD : vi can be NULL
+ assert( lfds610_freelist_validity != NULL );
+
+ *lfds610_freelist_validity = LFDS610_VALIDITY_VALID;
+
+ fe_slow = fe_fast = (struct lfds610_freelist_element *) fs->top[LFDS610_FREELIST_POINTER];
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the top of the lfds610_freelist
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the lfds610_freelist)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( fe_slow != NULL )
+ do
+ {
+ fe_slow = fe_slow->next[LFDS610_FREELIST_POINTER];
+
+ if( fe_fast != NULL )
+ fe_fast = fe_fast->next[LFDS610_FREELIST_POINTER];
+
+ if( fe_fast != NULL )
+ fe_fast = fe_fast->next[LFDS610_FREELIST_POINTER];
+ }
+ while( fe_slow != NULL and fe_fast != fe_slow );
+
+ if( fe_fast != NULL and fe_slow != NULL and fe_fast == fe_slow )
+ *lfds610_freelist_validity = LFDS610_VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds610_freelist_validity == LFDS610_VALIDITY_VALID and vi != NULL )
+ {
+ fe = (struct lfds610_freelist_element *) fs->top[LFDS610_FREELIST_POINTER];
+
+ while( fe != NULL )
+ {
+ element_count++;
+ fe = (struct lfds610_freelist_element *) fe->next[LFDS610_FREELIST_POINTER];
+ }
+
+ if( element_count < vi->min_elements )
+ *lfds610_freelist_validity = LFDS610_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( element_count > vi->max_elements )
+ *lfds610_freelist_validity = LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+#include "lfds610_liblfds_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds610_liblfds_abstraction_test_helper_increment_non_atomic( lfds610_atom_t *shared_counter )
+{
+ /* TRD : lfds610_atom_t must be volatile or the compiler
+ optimizes it away into a single store
+ */
+
+ volatile lfds610_atom_t
+ count = 0;
+
+ assert( shared_counter != NULL );
+
+ while( count++ < 10000000 )
+ (*(lfds610_atom_t *) shared_counter)++;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_liblfds_abstraction_test_helper_increment_atomic( volatile lfds610_atom_t *shared_counter )
+{
+ lfds610_atom_t
+ count = 0;
+
+ assert( shared_counter != NULL );
+
+ while( count++ < 10000000 )
+ lfds610_abstraction_increment( shared_counter );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_liblfds_abstraction_test_helper_cas( volatile lfds610_atom_t *shared_counter, lfds610_atom_t *local_counter )
+{
+ lfds610_atom_t
+ loop = 0,
+ original_destination;
+
+ LFDS610_ALIGN(LFDS610_ALIGN_SINGLE_POINTER) lfds610_atom_t
+ exchange,
+ compare;
+
+ assert( shared_counter != NULL );
+ assert( local_counter != NULL );
+
+ while( loop++ < 1000000 )
+ {
+ do
+ {
+ compare = *shared_counter;
+ exchange = compare + 1;
+
+ original_destination = lfds610_abstraction_cas( shared_counter, exchange, compare );
+ }
+ while( original_destination != compare );
+
+ (*local_counter)++;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_liblfds_abstraction_test_helper_dcas( volatile lfds610_atom_t *shared_counter, lfds610_atom_t *local_counter )
+{
+ lfds610_atom_t
+ loop = 0;
+
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) lfds610_atom_t
+ exchange[2],
+ compare[2];
+
+ assert( shared_counter != NULL );
+ assert( local_counter != NULL );
+
+ while( loop++ < 1000000 )
+ {
+ compare[0] = *shared_counter;
+ compare[1] = *(shared_counter+1);
+
+ do
+ {
+ exchange[0] = compare[0] + 1;
+ exchange[1] = compare[1];
+ }
+ while( 0 == lfds610_abstraction_dcas(shared_counter, exchange, compare) );
+
+ (*local_counter)++;
+ }
+
+ return;
+}
+
--- /dev/null
+#include "lfds610_liblfds_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds610_liblfds_aligned_free( void *memory )
+{
+ assert( memory != NULL );
+
+ // TRD : the "void *" stored above memory points to the root of the allocation
+ lfds610_abstraction_free( *( (void **) memory - 1 ) );
+
+ return;
+}
+
--- /dev/null
+#include "lfds610_liblfds_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void *lfds610_liblfds_aligned_malloc( size_t size, size_t align_in_bytes )
+{
+ void
+ *original_memory,
+ *memory;
+
+ size_t
+ offset;
+
+ // TRD : size can be any value in its range
+ // TRD : align_in_bytes can be any value in its range
+
+ original_memory = memory = lfds610_abstraction_malloc( size + sizeof(void *) + align_in_bytes );
+
+ if( memory != NULL )
+ {
+ memory = (void **) memory + 1;
+ offset = align_in_bytes - (size_t) memory % align_in_bytes;
+ memory = (unsigned char *) memory + offset;
+ *( (void **) memory - 1 ) = original_memory;
+ }
+
+ return( memory );
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "liblfds610_internal.h"
+
+
--- /dev/null
+This is not a data structure but rather functions internal to the library.
+
--- /dev/null
+#include "lfds610_queue_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds610_queue_delete( struct lfds610_queue_state *qs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )
+{
+ void
+ *user_data;
+
+ assert( qs != NULL );
+ // TRD : user_data_delete_function can be NULL
+ // TRD : user_state can be NULL
+
+ // TRD : leading load barrier not required as it will be performed by the dequeue
+
+ while( lfds610_queue_dequeue(qs, &user_data) )
+ if( user_data_delete_function != NULL )
+ user_data_delete_function( user_data, user_state );
+
+ /* TRD : fully dequeuing will leave us
+ with a single dummy element
+ which both qs->enqueue and qs->dequeue point at
+ we push this back onto the lfds610_freelist
+ before we delete the lfds610_freelist
+ */
+
+ lfds610_freelist_push( qs->fs, qs->enqueue[LFDS610_QUEUE_POINTER]->fe );
+
+ lfds610_freelist_delete( qs->fs, lfds610_queue_internal_freelist_delete_function, NULL );
+
+ lfds610_liblfds_aligned_free( qs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds610_queue_internal_freelist_delete_function( void *user_data, void *user_state )
+{
+ assert( user_data != NULL );
+ assert( user_state == NULL );
+
+ lfds610_liblfds_aligned_free( user_data );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** the library wide include file *****/
+#include "liblfds610_internal.h"
+
+/***** pragmas *****/
+
+/***** defines *****/
+#define LFDS610_QUEUE_STATE_UNKNOWN -1
+#define LFDS610_QUEUE_STATE_EMPTY 0
+#define LFDS610_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE 1
+#define LFDS610_QUEUE_STATE_ATTEMPT_DELFDS610_QUEUE 2
+
+#define LFDS610_QUEUE_POINTER 0
+#define LFDS610_QUEUE_COUNTER 1
+#define LFDS610_QUEUE_PAC_SIZE 2
+
+/***** structures *****/
+#pragma pack( push, LFDS610_ALIGN_DOUBLE_POINTER )
+
+struct lfds610_queue_state
+{
+ struct lfds610_queue_element
+ *volatile enqueue[LFDS610_QUEUE_PAC_SIZE],
+ *volatile dequeue[LFDS610_QUEUE_PAC_SIZE];
+
+ lfds610_atom_t
+ aba_counter;
+
+ struct lfds610_freelist_state
+ *fs;
+};
+
+struct lfds610_queue_element
+{
+ // TRD : next in a lfds610_queue requires volatile as it is target of CAS
+ struct lfds610_queue_element
+ *volatile next[LFDS610_QUEUE_PAC_SIZE];
+
+ struct lfds610_freelist_element
+ *fe;
+
+ void
+ *user_data;
+};
+
+#pragma pack( pop )
+
+/***** externs *****/
+
+/***** private prototypes *****/
+int lfds610_queue_internal_freelist_init_function( void **user_data, void *user_state );
+void lfds610_queue_internal_freelist_delete_function( void *user_data, void *user_state );
+
+void lfds610_queue_internal_new_element_from_freelist( struct lfds610_queue_state *qs, struct lfds610_queue_element *qe[LFDS610_QUEUE_PAC_SIZE], void *user_data );
+void lfds610_queue_internal_guaranteed_new_element_from_freelist( struct lfds610_queue_state *qs, struct lfds610_queue_element * qe[LFDS610_QUEUE_PAC_SIZE], void *user_data );
+void lfds610_queue_internal_init_element( struct lfds610_queue_state *qs, struct lfds610_queue_element *qe[LFDS610_QUEUE_PAC_SIZE], struct lfds610_freelist_element *fe, void *user_data );
+
+void lfds610_queue_internal_queue( struct lfds610_queue_state *qs, struct lfds610_queue_element *qe[LFDS610_QUEUE_PAC_SIZE] );
+
+void lfds610_queue_internal_validate( struct lfds610_queue_state *qs, struct lfds610_validation_info *vi, enum lfds610_data_structure_validity *lfds610_queue_validity, enum lfds610_data_structure_validity *lfds610_freelist_validity );
+
--- /dev/null
+#include "lfds610_queue_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds610_queue_new( struct lfds610_queue_state **qs, lfds610_atom_t number_elements )
+{
+ int
+ rv = 0;
+
+ struct lfds610_queue_element
+ *qe[LFDS610_QUEUE_PAC_SIZE];
+
+ assert( qs != NULL );
+ // TRD : number_elements can be any value in its range
+
+ *qs = (struct lfds610_queue_state *) lfds610_liblfds_aligned_malloc( sizeof(struct lfds610_queue_state), LFDS610_ALIGN_DOUBLE_POINTER );
+
+ if( *qs != NULL )
+ {
+ // TRD : the size of the lfds610_freelist is the size of the lfds610_queue (+1 for the leading dummy element, which is hidden from the caller)
+ lfds610_freelist_new( &(*qs)->fs, number_elements+1, lfds610_queue_internal_freelist_init_function, NULL );
+
+ if( (*qs)->fs != NULL )
+ {
+ lfds610_queue_internal_new_element_from_freelist( *qs, qe, NULL );
+ (*qs)->enqueue[LFDS610_QUEUE_POINTER] = (*qs)->dequeue[LFDS610_QUEUE_POINTER] = qe[LFDS610_QUEUE_POINTER];
+ (*qs)->enqueue[LFDS610_QUEUE_COUNTER] = (*qs)->dequeue[LFDS610_QUEUE_COUNTER] = 0;
+ (*qs)->aba_counter = 0;
+ rv = 1;
+ }
+
+ if( (*qs)->fs == NULL )
+ {
+ lfds610_liblfds_aligned_free( *qs );
+ *qs = NULL;
+ }
+ }
+
+ LFDS610_BARRIER_STORE;
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds610_queue_use( struct lfds610_queue_state *qs )
+{
+ assert( qs != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int lfds610_queue_internal_freelist_init_function( void **user_data, void *user_state )
+{
+ int
+ rv = 0;
+
+ assert( user_data != NULL );
+ assert( user_state == NULL );
+
+ *user_data = lfds610_liblfds_aligned_malloc( sizeof(struct lfds610_queue_element), LFDS610_ALIGN_DOUBLE_POINTER );
+
+ if( *user_data != NULL )
+ rv = 1;
+
+ return( rv );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+void lfds610_queue_internal_new_element_from_freelist( struct lfds610_queue_state *qs, struct lfds610_queue_element *qe[LFDS610_QUEUE_PAC_SIZE], void *user_data )
+{
+ struct lfds610_freelist_element
+ *fe;
+
+ assert( qs != NULL );
+ assert( qe != NULL );
+ // TRD : user_data can be any value in its range
+
+ qe[LFDS610_QUEUE_POINTER] = NULL;
+
+ lfds610_freelist_pop( qs->fs, &fe );
+
+ if( fe != NULL )
+ lfds610_queue_internal_init_element( qs, qe, fe, user_data );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_queue_internal_guaranteed_new_element_from_freelist( struct lfds610_queue_state *qs, struct lfds610_queue_element *qe[LFDS610_QUEUE_PAC_SIZE], void *user_data )
+{
+ struct lfds610_freelist_element
+ *fe;
+
+ assert( qs != NULL );
+ assert( qe != NULL );
+ // TRD : user_data can be any value in its range
+
+ qe[LFDS610_QUEUE_POINTER] = NULL;
+
+ lfds610_freelist_guaranteed_pop( qs->fs, &fe );
+
+ if( fe != NULL )
+ lfds610_queue_internal_init_element( qs, qe, fe, user_data );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_queue_internal_init_element( struct lfds610_queue_state *qs, struct lfds610_queue_element *qe[LFDS610_QUEUE_PAC_SIZE], struct lfds610_freelist_element *fe, void *user_data )
+{
+ assert( qs != NULL );
+ assert( qe != NULL );
+ assert( fe != NULL );
+ // TRD : user_data can be any value in its range
+
+ lfds610_freelist_get_user_data_from_element( fe, (void **) &qe[LFDS610_QUEUE_POINTER] );
+ qe[LFDS610_QUEUE_COUNTER] = (struct lfds610_queue_element *) lfds610_abstraction_increment( (lfds610_atom_t *) &qs->aba_counter );
+
+ qe[LFDS610_QUEUE_POINTER]->next[LFDS610_QUEUE_POINTER] = NULL;
+ qe[LFDS610_QUEUE_POINTER]->next[LFDS610_QUEUE_COUNTER] = (struct lfds610_queue_element *) lfds610_abstraction_increment( (lfds610_atom_t *) &qs->aba_counter );
+
+ qe[LFDS610_QUEUE_POINTER]->fe = fe;
+ qe[LFDS610_QUEUE_POINTER]->user_data = user_data;
+
+ return;
+}
+
--- /dev/null
+#include "lfds610_queue_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds610_queue_query( struct lfds610_queue_state *qs, enum lfds610_queue_query_type query_type, void *query_input, void *query_output )
+{
+ assert( qs != NULL );
+ // TRD : query_type can be any value in its range
+ // TRD : query_input can be NULL
+ assert( query_output != NULL );
+
+ switch( query_type )
+ {
+ case LFDS610_QUEUE_QUERY_ELEMENT_COUNT:
+ assert( query_input == NULL );
+
+ lfds610_freelist_query( qs->fs, LFDS610_FREELIST_QUERY_ELEMENT_COUNT, NULL, query_output );
+ break;
+
+ case LFDS610_QUEUE_QUERY_VALIDATE:
+ // TRD : query_input can be NULL
+
+ lfds610_queue_internal_validate( qs, (struct lfds610_validation_info *) query_input, (enum lfds610_data_structure_validity *) query_output, ((enum lfds610_data_structure_validity *) query_output)+1 );
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+void lfds610_queue_internal_validate( struct lfds610_queue_state *qs, struct lfds610_validation_info *vi, enum lfds610_data_structure_validity *lfds610_queue_validity, enum lfds610_data_structure_validity *lfds610_freelist_validity )
+{
+ struct lfds610_queue_element
+ *qe,
+ *qe_slow,
+ *qe_fast;
+
+ lfds610_atom_t
+ element_count = 0,
+ total_elements;
+
+ struct lfds610_validation_info
+ lfds610_freelist_vi;
+
+ assert( qs != NULL );
+ // TRD : vi can be NULL
+ assert( lfds610_queue_validity != NULL );
+ assert( lfds610_freelist_validity != NULL );
+
+ *lfds610_queue_validity = LFDS610_VALIDITY_VALID;
+
+ LFDS610_BARRIER_LOAD;
+
+ qe_slow = qe_fast = (struct lfds610_queue_element *) qs->dequeue[LFDS610_QUEUE_POINTER];
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the dequeue end of the lfds610_queue
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the lfds610_queue)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( qe_slow != NULL )
+ do
+ {
+ qe_slow = qe_slow->next[LFDS610_QUEUE_POINTER];
+
+ if( qe_fast != NULL )
+ qe_fast = qe_fast->next[LFDS610_QUEUE_POINTER];
+
+ if( qe_fast != NULL )
+ qe_fast = qe_fast->next[LFDS610_QUEUE_POINTER];
+ }
+ while( qe_slow != NULL and qe_fast != qe_slow );
+
+ if( qe_fast != NULL and qe_slow != NULL and qe_fast == qe_slow )
+ *lfds610_queue_validity = LFDS610_VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds610_queue_validity == LFDS610_VALIDITY_VALID and vi != NULL )
+ {
+ qe = (struct lfds610_queue_element *) qs->dequeue[LFDS610_QUEUE_POINTER];
+
+ while( qe != NULL )
+ {
+ element_count++;
+ qe = (struct lfds610_queue_element *) qe->next[LFDS610_QUEUE_POINTER];
+ }
+
+ /* TRD : remember there is a dummy element in the lfds610_queue */
+ element_count--;
+
+ if( element_count < vi->min_elements )
+ *lfds610_queue_validity = LFDS610_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( element_count > vi->max_elements )
+ *lfds610_queue_validity = LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ /* TRD : now we validate the lfds610_freelist
+
+ we may be able to check for the expected number of
+ elements in the lfds610_freelist
+
+ if the caller has given us an expected min and max
+ number of elements in the lfds610_queue, then the total number
+ of elements in the lfds610_freelist, minus that min and max,
+ gives us the expected number of elements in the
+ lfds610_freelist
+ */
+
+ if( vi != NULL )
+ {
+ lfds610_freelist_query( qs->fs, LFDS610_FREELIST_QUERY_ELEMENT_COUNT, NULL, (void *) &total_elements );
+
+ /* TRD : remember there is a dummy element in the lfds610_queue */
+ total_elements--;
+
+ lfds610_freelist_vi.min_elements = total_elements - vi->max_elements;
+ lfds610_freelist_vi.max_elements = total_elements - vi->min_elements;
+
+ lfds610_freelist_query( qs->fs, LFDS610_FREELIST_QUERY_VALIDATE, (void *) &lfds610_freelist_vi, (void *) lfds610_freelist_validity );
+ }
+
+ if( vi == NULL )
+ lfds610_freelist_query( qs->fs, LFDS610_FREELIST_QUERY_VALIDATE, NULL, (void *) lfds610_freelist_validity );
+
+ return;
+}
+
--- /dev/null
+#include "lfds610_queue_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds610_queue_enqueue( struct lfds610_queue_state *qs, void *user_data )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) struct lfds610_queue_element
+ *qe[LFDS610_QUEUE_PAC_SIZE];
+
+ assert( qs != NULL );
+ // TRD : user_data can be NULL
+
+ lfds610_queue_internal_new_element_from_freelist( qs, qe, user_data );
+
+ if( qe[LFDS610_QUEUE_POINTER] == NULL )
+ return( 0 );
+
+ lfds610_queue_internal_queue( qs, qe );
+
+ return( 1 );
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds610_queue_guaranteed_enqueue( struct lfds610_queue_state *qs, void *user_data )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) struct lfds610_queue_element
+ *qe[LFDS610_QUEUE_PAC_SIZE];
+
+ assert( qs != NULL );
+ // TRD : user_data can be NULL
+
+ lfds610_queue_internal_guaranteed_new_element_from_freelist( qs, qe, user_data );
+
+ if( qe[LFDS610_QUEUE_POINTER] == NULL )
+ return( 0 );
+
+ lfds610_queue_internal_queue( qs, qe );
+
+ return( 1 );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_queue_internal_queue( struct lfds610_queue_state *qs, struct lfds610_queue_element *qe[LFDS610_QUEUE_PAC_SIZE] )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) struct lfds610_queue_element
+ *enqueue[LFDS610_QUEUE_PAC_SIZE],
+ *next[LFDS610_QUEUE_PAC_SIZE];
+
+ unsigned char
+ cas_result = 0;
+
+ assert( qs != NULL );
+ assert( qe != NULL );
+
+ // TRD : the DCAS operation issues a read and write barrier, so we don't need a read barrier in the do() loop
+
+ LFDS610_BARRIER_LOAD;
+
+ do
+ {
+ enqueue[LFDS610_QUEUE_POINTER] = qs->enqueue[LFDS610_QUEUE_POINTER];
+ enqueue[LFDS610_QUEUE_COUNTER] = qs->enqueue[LFDS610_QUEUE_COUNTER];
+
+ next[LFDS610_QUEUE_POINTER] = enqueue[LFDS610_QUEUE_POINTER]->next[LFDS610_QUEUE_POINTER];
+ next[LFDS610_QUEUE_COUNTER] = enqueue[LFDS610_QUEUE_POINTER]->next[LFDS610_QUEUE_COUNTER];
+
+ /* TRD : this if() ensures that the next we read, just above,
+ really is from qs->enqueue (which we copied into enqueue)
+ */
+
+ LFDS610_BARRIER_LOAD;
+
+ if( qs->enqueue[LFDS610_QUEUE_POINTER] == enqueue[LFDS610_QUEUE_POINTER] and qs->enqueue[LFDS610_QUEUE_COUNTER] == enqueue[LFDS610_QUEUE_COUNTER] )
+ {
+ if( next[LFDS610_QUEUE_POINTER] == NULL )
+ {
+ qe[LFDS610_QUEUE_COUNTER] = next[LFDS610_QUEUE_COUNTER] + 1;
+ cas_result = lfds610_abstraction_dcas( (volatile lfds610_atom_t *) enqueue[LFDS610_QUEUE_POINTER]->next, (lfds610_atom_t *) qe, (lfds610_atom_t *) next );
+ }
+ else
+ {
+ next[LFDS610_QUEUE_COUNTER] = enqueue[LFDS610_QUEUE_COUNTER] + 1;
+ lfds610_abstraction_dcas( (volatile lfds610_atom_t *) qs->enqueue, (lfds610_atom_t *) next, (lfds610_atom_t *) enqueue );
+ }
+ }
+ }
+ while( cas_result == 0 );
+
+ qe[LFDS610_QUEUE_COUNTER] = enqueue[LFDS610_QUEUE_COUNTER] + 1;
+ lfds610_abstraction_dcas( (volatile lfds610_atom_t *) qs->enqueue, (lfds610_atom_t *) qe, (lfds610_atom_t *) enqueue );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds610_queue_dequeue( struct lfds610_queue_state *qs, void **user_data )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) struct lfds610_queue_element
+ *enqueue[LFDS610_QUEUE_PAC_SIZE],
+ *dequeue[LFDS610_QUEUE_PAC_SIZE],
+ *next[LFDS610_QUEUE_PAC_SIZE];
+
+ unsigned char
+ cas_result = 0;
+
+ int
+ rv = 1,
+ state = LFDS610_QUEUE_STATE_UNKNOWN,
+ finished_flag = LOWERED;
+
+ assert( qs != NULL );
+ assert( user_data != NULL );
+
+ // TRD : the DCAS operation issues a read and write barrier, so we don't need a read barrier in the do() loop
+
+ LFDS610_BARRIER_LOAD;
+
+ do
+ {
+ dequeue[LFDS610_QUEUE_POINTER] = qs->dequeue[LFDS610_QUEUE_POINTER];
+ dequeue[LFDS610_QUEUE_COUNTER] = qs->dequeue[LFDS610_QUEUE_COUNTER];
+
+ enqueue[LFDS610_QUEUE_POINTER] = qs->enqueue[LFDS610_QUEUE_POINTER];
+ enqueue[LFDS610_QUEUE_COUNTER] = qs->enqueue[LFDS610_QUEUE_COUNTER];
+
+ next[LFDS610_QUEUE_POINTER] = dequeue[LFDS610_QUEUE_POINTER]->next[LFDS610_QUEUE_POINTER];
+ next[LFDS610_QUEUE_COUNTER] = dequeue[LFDS610_QUEUE_POINTER]->next[LFDS610_QUEUE_COUNTER];
+
+ /* TRD : confirm that dequeue didn't move between reading it
+ and reading its next pointer
+ */
+
+ LFDS610_BARRIER_LOAD;
+
+ if( dequeue[LFDS610_QUEUE_POINTER] == qs->dequeue[LFDS610_QUEUE_POINTER] and dequeue[LFDS610_QUEUE_COUNTER] == qs->dequeue[LFDS610_QUEUE_COUNTER] )
+ {
+ if( enqueue[LFDS610_QUEUE_POINTER] == dequeue[LFDS610_QUEUE_POINTER] and next[LFDS610_QUEUE_POINTER] == NULL )
+ state = LFDS610_QUEUE_STATE_EMPTY;
+
+ if( enqueue[LFDS610_QUEUE_POINTER] == dequeue[LFDS610_QUEUE_POINTER] and next[LFDS610_QUEUE_POINTER] != NULL )
+ state = LFDS610_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE;
+
+ if( enqueue[LFDS610_QUEUE_POINTER] != dequeue[LFDS610_QUEUE_POINTER] )
+ state = LFDS610_QUEUE_STATE_ATTEMPT_DELFDS610_QUEUE;
+
+ switch( state )
+ {
+ case LFDS610_QUEUE_STATE_EMPTY:
+ *user_data = NULL;
+ rv = 0;
+ finished_flag = RAISED;
+ break;
+
+ case LFDS610_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE:
+ next[LFDS610_QUEUE_COUNTER] = enqueue[LFDS610_QUEUE_COUNTER] + 1;
+ lfds610_abstraction_dcas( (volatile lfds610_atom_t *) qs->enqueue, (lfds610_atom_t *) next, (lfds610_atom_t *) enqueue );
+ break;
+
+ case LFDS610_QUEUE_STATE_ATTEMPT_DELFDS610_QUEUE:
+ *user_data = next[LFDS610_QUEUE_POINTER]->user_data;
+
+ next[LFDS610_QUEUE_COUNTER] = dequeue[LFDS610_QUEUE_COUNTER] + 1;
+ cas_result = lfds610_abstraction_dcas( (volatile lfds610_atom_t *) qs->dequeue, (lfds610_atom_t *) next, (lfds610_atom_t *) dequeue );
+
+ if( cas_result == 1 )
+ finished_flag = RAISED;
+ break;
+ }
+ }
+ }
+ while( finished_flag == LOWERED );
+
+ if( cas_result == 1 )
+ lfds610_freelist_push( qs->fs, dequeue[LFDS610_QUEUE_POINTER]->fe );
+
+ return( rv );
+}
+
--- /dev/null
+#include "lfds610_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds610_ringbuffer_delete( struct lfds610_ringbuffer_state *rs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )
+{
+ assert( rs != NULL );
+ // TRD : user_data_delete_function can be NULL
+ // TRD : user_state can be NULL
+
+ lfds610_queue_delete( rs->qs, NULL, NULL );
+
+ lfds610_freelist_delete( rs->fs, user_data_delete_function, user_state );
+
+ lfds610_liblfds_aligned_free( rs );
+
+ return;
+}
+
--- /dev/null
+#include "lfds610_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+struct lfds610_freelist_element *lfds610_ringbuffer_get_read_element( struct lfds610_ringbuffer_state *rs, struct lfds610_freelist_element **fe )
+{
+ assert( rs != NULL );
+ assert( fe != NULL );
+
+ lfds610_queue_dequeue( rs->qs, (void **) fe );
+
+ return( *fe );
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds610_freelist_element *lfds610_ringbuffer_get_write_element( struct lfds610_ringbuffer_state *rs, struct lfds610_freelist_element **fe, int *overwrite_flag )
+{
+ assert( rs != NULL );
+ assert( fe != NULL );
+ // TRD : overwrite_flag can be NULL
+
+ /* TRD : we try to obtain an element from the lfds610_freelist
+ if we can, we populate it and add it to the lfds610_queue
+
+ if we cannot, then the lfds610_ringbuffer is full
+ so instead we grab the current read element and
+ use that instead
+
+ dequeue may fail since the lfds610_queue may be emptied
+ during our dequeue attempt
+
+ so what we actually do here is a loop, attempting
+ the lfds610_freelist and if it fails then a dequeue, until
+ we obtain an element
+
+ once we have an element, we lfds610_queue it
+
+ you may be wondering why this operation is in a loop
+ remember - these operations are lock-free; anything
+ can happen in between
+
+ so for example the pop could fail because the lfds610_freelist
+ is empty; but by the time we go to get an element from
+ the lfds610_queue, the whole lfds610_queue has been emptied back into
+ the lfds610_freelist!
+
+ if overwrite_flag is provided, we set it to 0 if we
+ obtained a new element from the lfds610_freelist, 1 if we
+ stole an element from the lfds610_queue
+ */
+
+ do
+ {
+ if( overwrite_flag != NULL )
+ *overwrite_flag = 0;
+
+ lfds610_freelist_pop( rs->fs, fe );
+
+ if( *fe == NULL )
+ {
+ lfds610_ringbuffer_get_read_element( rs, fe );
+
+ if( overwrite_flag != NULL and *fe != NULL )
+ *overwrite_flag = 1;
+ }
+ }
+ while( *fe == NULL );
+
+ return( *fe );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_ringbuffer_put_read_element( struct lfds610_ringbuffer_state *rs, struct lfds610_freelist_element *fe )
+{
+ assert( rs != NULL );
+ assert( fe != NULL );
+
+ lfds610_freelist_push( rs->fs, fe );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_ringbuffer_put_write_element( struct lfds610_ringbuffer_state *rs, struct lfds610_freelist_element *fe )
+{
+ assert( rs != NULL );
+ assert( fe != NULL );
+
+ lfds610_queue_enqueue( rs->qs, fe );
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "liblfds610_internal.h"
+
+/***** defines *****/
+
+/***** structures *****/
+#pragma pack( push, LFDS610_ALIGN_DOUBLE_POINTER )
+
+struct lfds610_ringbuffer_state
+{
+ struct lfds610_queue_state
+ *qs;
+
+ struct lfds610_freelist_state
+ *fs;
+};
+
+#pragma pack( pop )
+
+/***** externs *****/
+
+/***** private prototypes *****/
+void lfds610_ringbuffer_internal_validate( struct lfds610_ringbuffer_state *rs, struct lfds610_validation_info *vi, enum lfds610_data_structure_validity *lfds610_queue_validity, enum lfds610_data_structure_validity *lfds610_freelist_validity );
+
--- /dev/null
+#include "lfds610_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds610_ringbuffer_new( struct lfds610_ringbuffer_state **rs, lfds610_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state )
+{
+ int
+ rv = 0;
+
+ assert( rs != NULL );
+ // TRD : number_elements can be any value in its range
+ // TRD : user_data_init_function can be NULL
+ // TRD : user_state can be NULL
+
+ *rs = (struct lfds610_ringbuffer_state *) lfds610_liblfds_aligned_malloc( sizeof(struct lfds610_ringbuffer_state), LFDS610_ALIGN_DOUBLE_POINTER );
+
+ if( *rs != NULL )
+ {
+ lfds610_freelist_new( &(*rs)->fs, number_elements, user_data_init_function, user_state );
+
+ if( (*rs)->fs != NULL )
+ {
+ lfds610_queue_new( &(*rs)->qs, number_elements );
+
+ if( (*rs)->qs != NULL )
+ rv = 1;
+
+ if( (*rs)->qs == NULL )
+ {
+ lfds610_liblfds_aligned_free( *rs );
+ *rs = NULL;
+ }
+ }
+
+ if( (*rs)->fs == NULL )
+ {
+ lfds610_liblfds_aligned_free( *rs );
+ *rs = NULL;
+ }
+ }
+
+ LFDS610_BARRIER_STORE;
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds610_ringbuffer_use( struct lfds610_ringbuffer_state *rs )
+{
+ assert( rs != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+#include "lfds610_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds610_ringbuffer_query( struct lfds610_ringbuffer_state *rs, enum lfds610_ringbuffer_query_type query_type, void *query_input, void *query_output )
+{
+ assert( rs != NULL );
+ // TRD : query_type can be any value in its range
+ // TRD : query_input can be NULL
+ assert( query_output != NULL );
+
+ switch( query_type )
+ {
+ case LFDS610_RINGBUFFER_QUERY_VALIDATE:
+ // TRD : query_input can be NULL
+
+ lfds610_ringbuffer_internal_validate( rs, (struct lfds610_validation_info *) query_input, (enum lfds610_data_structure_validity *) query_output, ((enum lfds610_data_structure_validity *) query_output)+2 );
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+void lfds610_ringbuffer_internal_validate( struct lfds610_ringbuffer_state *rs, struct lfds610_validation_info *vi, enum lfds610_data_structure_validity *lfds610_queue_validity, enum lfds610_data_structure_validity *lfds610_freelist_validity )
+{
+ assert( rs != NULL );
+ // TRD : vi can be NULL
+ assert( lfds610_queue_validity != NULL );
+ assert( lfds610_freelist_validity != NULL );
+
+ lfds610_queue_query( rs->qs, LFDS610_QUEUE_QUERY_VALIDATE, vi, lfds610_queue_validity );
+
+ if( vi != NULL )
+ {
+ struct lfds610_validation_info
+ lfds610_freelist_vi;
+
+ lfds610_atom_t
+ total_elements;
+
+ lfds610_freelist_query( rs->fs, LFDS610_FREELIST_QUERY_ELEMENT_COUNT, NULL, (void *) &total_elements );
+ lfds610_freelist_vi.min_elements = total_elements - vi->max_elements;
+ lfds610_freelist_vi.max_elements = total_elements - vi->min_elements;
+ lfds610_freelist_query( rs->fs, LFDS610_FREELIST_QUERY_VALIDATE, (void *) &lfds610_freelist_vi, (void *) lfds610_freelist_validity );
+ }
+
+ if( vi == NULL )
+ lfds610_freelist_query( rs->fs, LFDS610_FREELIST_QUERY_VALIDATE, NULL, (void *) lfds610_freelist_validity );
+
+ return;
+}
+
--- /dev/null
+#include "lfds610_slist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds610_slist_delete( struct lfds610_slist_state *ss )
+{
+ lfds610_slist_single_threaded_physically_delete_all_elements( ss );
+
+ lfds610_liblfds_aligned_free( ss );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds610_slist_logically_delete_element( struct lfds610_slist_state *ss, struct lfds610_slist_element *se )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) void
+ *volatile user_data_and_flags[2],
+ *volatile new_user_data_and_flags[2];
+
+ unsigned char
+ cas_rv = 0;
+
+ assert( ss != NULL );
+ assert( se != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ user_data_and_flags[LFDS610_SLIST_USER_DATA] = se->user_data_and_flags[LFDS610_SLIST_USER_DATA];
+ user_data_and_flags[LFDS610_SLIST_FLAGS] = se->user_data_and_flags[LFDS610_SLIST_FLAGS];
+
+ do
+ {
+ new_user_data_and_flags[LFDS610_SLIST_USER_DATA] = user_data_and_flags[LFDS610_SLIST_USER_DATA];
+ new_user_data_and_flags[LFDS610_SLIST_FLAGS] = (void *) ((lfds610_atom_t) user_data_and_flags[LFDS610_SLIST_FLAGS] | LFDS610_SLIST_FLAG_DELETED);
+ }
+ while( !((lfds610_atom_t) user_data_and_flags[LFDS610_SLIST_FLAGS] & LFDS610_SLIST_FLAG_DELETED) and 0 == (cas_rv = lfds610_abstraction_dcas((volatile lfds610_atom_t *) se->user_data_and_flags, (lfds610_atom_t *) new_user_data_and_flags, (lfds610_atom_t *) user_data_and_flags)) );
+
+ if( cas_rv == 1 )
+ if( ss->user_data_delete_function != NULL )
+ ss->user_data_delete_function( (void *) user_data_and_flags[LFDS610_SLIST_USER_DATA], ss->user_state );
+
+ return( cas_rv );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_slist_single_threaded_physically_delete_all_elements( struct lfds610_slist_state *ss )
+{
+ struct lfds610_slist_element
+ *volatile se,
+ *volatile se_temp;
+
+ LFDS610_BARRIER_LOAD;
+
+ se = ss->head;
+
+ while( se != NULL )
+ {
+ // TRD : if a non-deleted element and there is a delete function, call the delete function
+ if( ss->user_data_delete_function != NULL )
+ ss->user_data_delete_function( (void *) se->user_data_and_flags[LFDS610_SLIST_USER_DATA], ss->user_state );
+
+ se_temp = se;
+ se = se->next;
+ lfds610_liblfds_aligned_free( (void *) se_temp );
+ }
+
+ lfds610_slist_internal_init_slist( ss, ss->user_data_delete_function, ss->user_state );
+
+ LFDS610_BARRIER_STORE;
+
+ return;
+}
+
--- /dev/null
+#include "lfds610_slist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds610_slist_get_user_data_from_element( struct lfds610_slist_element *se, void **user_data )
+{
+ int
+ rv = 1;
+
+ assert( se != NULL );
+ assert( user_data != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ *user_data = (void *) se->user_data_and_flags[LFDS610_SLIST_USER_DATA];
+
+ if( (lfds610_atom_t) se->user_data_and_flags[LFDS610_SLIST_FLAGS] & LFDS610_SLIST_FLAG_DELETED )
+ rv = 0;
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds610_slist_set_user_data_in_element( struct lfds610_slist_element *se, void *user_data )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) void
+ *user_data_and_flags[2],
+ *new_user_data_and_flags[2];
+
+ int
+ rv = 1;
+
+ assert( se != NULL );
+ // TRD : user_data can be NULL
+
+ LFDS610_BARRIER_LOAD;
+
+ user_data_and_flags[LFDS610_SLIST_USER_DATA] = se->user_data_and_flags[LFDS610_SLIST_USER_DATA];
+ user_data_and_flags[LFDS610_SLIST_FLAGS] = se->user_data_and_flags[LFDS610_SLIST_FLAGS];
+
+ new_user_data_and_flags[LFDS610_SLIST_USER_DATA] = user_data;
+
+ do
+ {
+ new_user_data_and_flags[LFDS610_SLIST_FLAGS] = user_data_and_flags[LFDS610_SLIST_FLAGS];
+ }
+ while( !((lfds610_atom_t) user_data_and_flags[LFDS610_SLIST_FLAGS] & LFDS610_SLIST_FLAG_DELETED) and
+ 0 == lfds610_abstraction_dcas((volatile lfds610_atom_t *) se->user_data_and_flags, (lfds610_atom_t *) new_user_data_and_flags, (lfds610_atom_t *) user_data_and_flags) );
+
+ if( (lfds610_atom_t) user_data_and_flags[LFDS610_SLIST_FLAGS] & LFDS610_SLIST_FLAG_DELETED )
+ rv = 0;
+
+ LFDS610_BARRIER_STORE;
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds610_slist_element *lfds610_slist_get_head( struct lfds610_slist_state *ss, struct lfds610_slist_element **se )
+{
+ assert( ss != NULL );
+ assert( se != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ *se = (struct lfds610_slist_element *) ss->head;
+
+ lfds610_slist_internal_move_to_first_undeleted_element( se );
+
+ return( *se );
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds610_slist_element *lfds610_slist_get_next( struct lfds610_slist_element *se, struct lfds610_slist_element **next_se )
+{
+ assert( se != NULL );
+ assert( next_se != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ *next_se = (struct lfds610_slist_element *) se->next;
+
+ lfds610_slist_internal_move_to_first_undeleted_element( next_se );
+
+ return( *next_se );
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds610_slist_element *lfds610_slist_get_head_and_then_next( struct lfds610_slist_state *ss, struct lfds610_slist_element **se )
+{
+ assert( ss != NULL );
+ assert( se != NULL );
+
+ if( *se == NULL )
+ lfds610_slist_get_head( ss, se );
+ else
+ lfds610_slist_get_next( *se, se );
+
+ return( *se );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_slist_internal_move_to_first_undeleted_element( struct lfds610_slist_element **se )
+{
+ assert( se != NULL );
+
+ while( *se != NULL and (lfds610_atom_t) (*se)->user_data_and_flags[LFDS610_SLIST_FLAGS] & LFDS610_SLIST_FLAG_DELETED )
+ (*se) = (struct lfds610_slist_element *) (*se)->next;
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "liblfds610_internal.h"
+
+/***** defines *****/
+#define LFDS610_SLIST_USER_DATA 0
+#define LFDS610_SLIST_FLAGS 1
+
+#define LFDS610_SLIST_NO_FLAGS 0x0
+#define LFDS610_SLIST_FLAG_DELETED 0x1
+
+/***** structures *****/
+#pragma pack( push, LFDS610_ALIGN_SINGLE_POINTER )
+
+struct lfds610_slist_state
+{
+ struct lfds610_slist_element
+ *volatile head;
+
+ void
+ (*user_data_delete_function)( void *user_data, void *user_state ),
+ *user_state;
+};
+
+#pragma pack( pop )
+
+#pragma pack( push, LFDS610_ALIGN_DOUBLE_POINTER )
+
+/* TRD : this pragma pack doesn't seem to work under Windows
+ if the structure members are the correct way round
+ (next first), then user_data_and_flags ends up on
+ a single pointer boundary and DCAS crashes
+
+ accordingly, I've moved user_data_and_flags first
+*/
+
+struct lfds610_slist_element
+{
+ void
+ *volatile user_data_and_flags[2];
+
+ // TRD : requires volatile as is target of CAS
+ struct lfds610_slist_element
+ *volatile next;
+};
+
+#pragma pack( pop )
+
+/***** private prototypes *****/
+void lfds610_slist_internal_init_slist( struct lfds610_slist_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );
+
+void lfds610_slist_internal_link_element_to_head( struct lfds610_slist_state *lfds610_slist_state, struct lfds610_slist_element *volatile se );
+void lfds610_slist_internal_link_element_after_element( struct lfds610_slist_element *volatile lfds610_slist_in_list_element, struct lfds610_slist_element *volatile se );
+
+void lfds610_slist_internal_move_to_first_undeleted_element( struct lfds610_slist_element **se );
+
--- /dev/null
+#include "lfds610_slist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds610_slist_internal_link_element_to_head( struct lfds610_slist_state *ss, struct lfds610_slist_element *volatile se )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_SINGLE_POINTER) struct lfds610_slist_element
+ *se_next;
+
+ assert( ss != NULL );
+ assert( se != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ se_next = ss->head;
+
+ do
+ {
+ se->next = se_next;
+ }
+ while( se->next != (se_next = (struct lfds610_slist_element *) lfds610_abstraction_cas((volatile lfds610_atom_t *) &ss->head, (lfds610_atom_t) se, (lfds610_atom_t) se->next)) );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_slist_internal_link_element_after_element( struct lfds610_slist_element *volatile lfds610_slist_in_list_element, struct lfds610_slist_element *volatile se )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_SINGLE_POINTER) struct lfds610_slist_element
+ *se_prev,
+ *se_next;
+
+ assert( lfds610_slist_in_list_element != NULL );
+ assert( se != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ se_prev = (struct lfds610_slist_element *) lfds610_slist_in_list_element;
+
+ se_next = se_prev->next;
+
+ do
+ {
+ se->next = se_next;
+ }
+ while( se->next != (se_next = (struct lfds610_slist_element *) lfds610_abstraction_cas((volatile lfds610_atom_t *) &se_prev->next, (lfds610_atom_t) se, (lfds610_atom_t) se->next)) );
+
+ return;
+}
+
--- /dev/null
+#include "lfds610_slist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds610_slist_new( struct lfds610_slist_state **ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )
+{
+ int
+ rv = 0;
+
+ assert( ss != NULL );
+ // TRD : user_data_delete_function can be NULL
+ // TRD : user_state can be NULL
+
+ *ss = (struct lfds610_slist_state *) lfds610_liblfds_aligned_malloc( sizeof(struct lfds610_slist_state), LFDS610_ALIGN_SINGLE_POINTER );
+
+ if( *ss != NULL )
+ {
+ lfds610_slist_internal_init_slist( *ss, user_data_delete_function, user_state );
+ rv = 1;
+ }
+
+ LFDS610_BARRIER_STORE;
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds610_slist_use( struct lfds610_slist_state *ss )
+{
+ assert( ss != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+void lfds610_slist_internal_init_slist( struct lfds610_slist_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )
+{
+ assert( ss != NULL );
+ // TRD : user_data_delete_function can be NULL
+ // TRD : user_state can be NULL
+
+ ss->head = NULL;
+ ss->user_data_delete_function = user_data_delete_function;
+ ss->user_state = user_state;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds610_slist_element *lfds610_slist_new_head( struct lfds610_slist_state *ss, void *user_data )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_SINGLE_POINTER) struct lfds610_slist_element
+ *volatile se;
+
+ assert( ss != NULL );
+ // TRD : user_data can be NULL
+
+ se = (struct lfds610_slist_element *) lfds610_liblfds_aligned_malloc( sizeof(struct lfds610_slist_element), LFDS610_ALIGN_DOUBLE_POINTER );
+
+ if( se != NULL )
+ {
+ se->user_data_and_flags[LFDS610_SLIST_USER_DATA] = user_data;
+ se->user_data_and_flags[LFDS610_SLIST_FLAGS] = LFDS610_SLIST_NO_FLAGS;
+
+ lfds610_slist_internal_link_element_to_head( ss, se );
+ }
+
+ return( (struct lfds610_slist_element *) se );
+}
+
+
+
+
+
+/****************************************************************************/
+struct lfds610_slist_element *lfds610_slist_new_next( struct lfds610_slist_element *se, void *user_data )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_SINGLE_POINTER) struct lfds610_slist_element
+ *volatile se_next;
+
+ assert( se != NULL );
+ // TRD : user_data can be NULL
+
+ se_next = (struct lfds610_slist_element *) lfds610_liblfds_aligned_malloc( sizeof(struct lfds610_slist_element), LFDS610_ALIGN_DOUBLE_POINTER );
+
+ if( se_next != NULL )
+ {
+ se_next->user_data_and_flags[LFDS610_SLIST_USER_DATA] = user_data;
+ se_next->user_data_and_flags[LFDS610_SLIST_FLAGS] = LFDS610_SLIST_NO_FLAGS;
+
+ lfds610_slist_internal_link_element_after_element( se, se_next );
+ }
+
+ return( (struct lfds610_slist_element *) se_next );
+}
+
--- /dev/null
+#include "lfds610_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds610_stack_delete( struct lfds610_stack_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )
+{
+ void
+ *user_data;
+
+ assert( ss != NULL );
+ // TRD : user_data_delete_function can be NULL
+ // TRD : user_state can be NULL
+
+ while( lfds610_stack_pop(ss, &user_data) )
+ if( user_data_delete_function != NULL )
+ user_data_delete_function( user_data, user_state );
+
+ lfds610_freelist_delete( ss->fs, lfds610_stack_internal_freelist_delete_function, NULL );
+
+ lfds610_liblfds_aligned_free( ss );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_stack_clear( struct lfds610_stack_state *ss, void (*user_data_clear_function)(void *user_data, void *user_state), void *user_state )
+{
+ void
+ *user_data;
+
+ assert( ss != NULL );
+ // TRD : user_data_clear_function can be NULL
+ // TRD : user_state can be NULL
+
+ while( lfds610_stack_pop(ss, &user_data) )
+ if( user_data_clear_function != NULL )
+ user_data_clear_function( user_data, user_state );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds610_stack_internal_freelist_delete_function( void *user_data, void *user_state )
+{
+ assert( user_data != NULL );
+ assert( user_state == NULL );
+
+ lfds610_liblfds_aligned_free( user_data );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** the library wide include file *****/
+#include "liblfds610_internal.h"
+
+/***** pragmas *****/
+
+/***** defines *****/
+#define LFDS610_STACK_POINTER 0
+#define LFDS610_STACK_COUNTER 1
+#define LFDS610_STACK_PAC_SIZE 2
+
+/***** structures *****/
+#pragma pack( push, LFDS610_ALIGN_DOUBLE_POINTER )
+
+struct lfds610_stack_state
+{
+ // TRD : must come first for alignment
+ struct lfds610_stack_element
+ *volatile top[LFDS610_STACK_PAC_SIZE];
+
+ lfds610_atom_t
+ aba_counter;
+
+ struct lfds610_freelist_state
+ *fs;
+};
+
+struct lfds610_stack_element
+{
+ struct lfds610_stack_element
+ *next[LFDS610_STACK_PAC_SIZE];
+
+ struct lfds610_freelist_element
+ *fe;
+
+ void
+ *user_data;
+};
+
+#pragma pack( pop )
+
+/***** private prototypes *****/
+int lfds610_stack_internal_freelist_init_function( void **user_data, void *user_state );
+void lfds610_stack_internal_freelist_delete_function( void *user_data, void *user_state );
+
+void lfds610_stack_internal_new_element_from_freelist( struct lfds610_stack_state *ss, struct lfds610_stack_element *se[LFDS610_STACK_PAC_SIZE], void *user_data );
+void lfds610_stack_internal_new_element( struct lfds610_stack_state *ss, struct lfds610_stack_element *se[LFDS610_STACK_PAC_SIZE], void *user_data );
+void lfds610_stack_internal_init_element( struct lfds610_stack_state *ss, struct lfds610_stack_element *se[LFDS610_STACK_PAC_SIZE], struct lfds610_freelist_element *fe, void *user_data );
+
+void lfds610_stack_internal_push( struct lfds610_stack_state *ss, struct lfds610_stack_element *se[LFDS610_STACK_PAC_SIZE] );
+
+void lfds610_stack_internal_validate( struct lfds610_stack_state *ss, struct lfds610_validation_info *vi, enum lfds610_data_structure_validity *stack_validity, enum lfds610_data_structure_validity *freelist_validity );
+
--- /dev/null
+#include "lfds610_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds610_stack_new( struct lfds610_stack_state **ss, lfds610_atom_t number_elements )
+{
+ int
+ rv = 0;
+
+ assert( ss != NULL );
+ // TRD : number_elements can be any value in its range
+
+ *ss = (struct lfds610_stack_state *) lfds610_liblfds_aligned_malloc( sizeof(struct lfds610_stack_state), LFDS610_ALIGN_DOUBLE_POINTER );
+
+ if( *ss != NULL )
+ {
+ // TRD : the size of the lfds610_freelist is the size of the lfds610_stack
+ lfds610_freelist_new( &(*ss)->fs, number_elements, lfds610_stack_internal_freelist_init_function, NULL );
+
+ if( (*ss)->fs == NULL )
+ {
+ lfds610_liblfds_aligned_free( *ss );
+ *ss = NULL;
+ }
+
+ if( (*ss)->fs != NULL )
+ {
+ (*ss)->top[LFDS610_STACK_POINTER] = NULL;
+ (*ss)->top[LFDS610_STACK_COUNTER] = 0;
+ (*ss)->aba_counter = 0;
+ rv = 1;
+ }
+ }
+
+ LFDS610_BARRIER_STORE;
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds610_stack_use( struct lfds610_stack_state *ss )
+{
+ assert( ss != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int lfds610_stack_internal_freelist_init_function( void **user_data, void *user_state )
+{
+ int
+ rv = 0;
+
+ assert( user_data != NULL );
+ assert( user_state == NULL );
+
+ *user_data = lfds610_liblfds_aligned_malloc( sizeof(struct lfds610_stack_element), LFDS610_ALIGN_DOUBLE_POINTER );
+
+ if( *user_data != NULL )
+ rv = 1;
+
+ return( rv );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+void lfds610_stack_internal_new_element_from_freelist( struct lfds610_stack_state *ss, struct lfds610_stack_element *se[LFDS610_STACK_PAC_SIZE], void *user_data )
+{
+ struct lfds610_freelist_element
+ *fe;
+
+ assert( ss != NULL );
+ assert( se != NULL );
+ // TRD : user_data can be any value in its range
+
+ lfds610_freelist_pop( ss->fs, &fe );
+
+ if( fe == NULL )
+ se[LFDS610_STACK_POINTER] = NULL;
+
+ if( fe != NULL )
+ lfds610_stack_internal_init_element( ss, se, fe, user_data );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_stack_internal_new_element( struct lfds610_stack_state *ss, struct lfds610_stack_element *se[LFDS610_STACK_PAC_SIZE], void *user_data )
+{
+ struct lfds610_freelist_element
+ *fe;
+
+ assert( ss != NULL );
+ assert( se != NULL );
+ // TRD : user_data can be any value in its range
+
+ lfds610_freelist_guaranteed_pop( ss->fs, &fe );
+
+ if( fe == NULL )
+ se[LFDS610_STACK_POINTER] = NULL;
+
+ if( fe != NULL )
+ lfds610_stack_internal_init_element( ss, se, fe, user_data );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_stack_internal_init_element( struct lfds610_stack_state *ss, struct lfds610_stack_element *se[LFDS610_STACK_PAC_SIZE], struct lfds610_freelist_element *fe, void *user_data )
+{
+ assert( ss != NULL );
+ assert( se != NULL );
+ assert( fe != NULL );
+ // TRD : user_data can be any value in its range
+
+ lfds610_freelist_get_user_data_from_element( fe, (void **) &se[LFDS610_STACK_POINTER] );
+
+ se[LFDS610_STACK_COUNTER] = (struct lfds610_stack_element *) lfds610_abstraction_increment( (lfds610_atom_t *) &ss->aba_counter );
+
+ se[LFDS610_STACK_POINTER]->next[LFDS610_STACK_POINTER] = NULL;
+ se[LFDS610_STACK_POINTER]->next[LFDS610_STACK_COUNTER] = 0;
+ se[LFDS610_STACK_POINTER]->fe = fe;
+ se[LFDS610_STACK_POINTER]->user_data = user_data;
+
+ return;
+}
+
--- /dev/null
+#include "lfds610_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds610_stack_push( struct lfds610_stack_state *ss, void *user_data )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) struct lfds610_stack_element
+ *se[LFDS610_STACK_PAC_SIZE];
+
+ assert( ss != NULL );
+ // TRD : user_data can be NULL
+
+ lfds610_stack_internal_new_element_from_freelist( ss, se, user_data );
+
+ if( se[LFDS610_STACK_POINTER] == NULL )
+ return( 0 );
+
+ lfds610_stack_internal_push( ss, se );
+
+ return( 1 );
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds610_stack_guaranteed_push( struct lfds610_stack_state *ss, void *user_data )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) struct lfds610_stack_element
+ *se[LFDS610_STACK_PAC_SIZE];
+
+ assert( ss != NULL );
+ // TRD : user_data can be NULL
+
+ /* TRD : this function allocated a new lfds610_freelist element and uses that
+ to push onto the lfds610_stack, guaranteeing success (unless malloc()
+ fails of course)
+ */
+
+ lfds610_stack_internal_new_element( ss, se, user_data );
+
+ // TRD : malloc failed
+ if( se[LFDS610_STACK_POINTER] == NULL )
+ return( 0 );
+
+ lfds610_stack_internal_push( ss, se );
+
+ return( 1 );
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_stack_internal_push( struct lfds610_stack_state *ss, struct lfds610_stack_element *se[LFDS610_STACK_PAC_SIZE] )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) struct lfds610_stack_element
+ *original_se_next[LFDS610_STACK_PAC_SIZE];
+
+ assert( ss != NULL );
+ assert( se != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ original_se_next[LFDS610_STACK_POINTER] = ss->top[LFDS610_STACK_POINTER];
+ original_se_next[LFDS610_STACK_COUNTER] = ss->top[LFDS610_STACK_COUNTER];
+
+ do
+ {
+ se[LFDS610_STACK_POINTER]->next[LFDS610_STACK_POINTER] = original_se_next[LFDS610_STACK_POINTER];
+ se[LFDS610_STACK_POINTER]->next[LFDS610_STACK_COUNTER] = original_se_next[LFDS610_STACK_COUNTER];
+ }
+ while( 0 == lfds610_abstraction_dcas((volatile lfds610_atom_t *) ss->top, (lfds610_atom_t *) se, (lfds610_atom_t *) original_se_next) );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds610_stack_pop( struct lfds610_stack_state *ss, void **user_data )
+{
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) struct lfds610_stack_element
+ *se[LFDS610_STACK_PAC_SIZE];
+
+ assert( ss != NULL );
+ assert( user_data != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ se[LFDS610_STACK_COUNTER] = ss->top[LFDS610_STACK_COUNTER];
+ se[LFDS610_STACK_POINTER] = ss->top[LFDS610_STACK_POINTER];
+
+ do
+ {
+ if( se[LFDS610_STACK_POINTER] == NULL )
+ return( 0 );
+ }
+ while( 0 == lfds610_abstraction_dcas((volatile lfds610_atom_t *) ss->top, (lfds610_atom_t *) se[LFDS610_STACK_POINTER]->next, (lfds610_atom_t *) se) );
+
+ *user_data = se[LFDS610_STACK_POINTER]->user_data;
+
+ lfds610_freelist_push( ss->fs, se[LFDS610_STACK_POINTER]->fe );
+
+ return( 1 );
+}
+
--- /dev/null
+#include "lfds610_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds610_stack_query( struct lfds610_stack_state *ss, enum lfds610_stack_query_type query_type, void *query_input, void *query_output )
+{
+ assert( ss != NULL );
+ // TRD : query_type can be any value in its range
+ // TRD : query_iput can be NULL
+ assert( query_output != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS610_STACK_QUERY_ELEMENT_COUNT:
+ assert( query_input == NULL );
+
+ lfds610_freelist_query( ss->fs, LFDS610_FREELIST_QUERY_ELEMENT_COUNT, NULL, query_output );
+ break;
+
+ case LFDS610_STACK_QUERY_VALIDATE:
+ // TRD : query_input can be NULL
+
+ /* TRD : the validation info passed in is for the stack
+ it indicates the minimum and maximum number of elements
+ which should be present
+
+ we need to validate the freelist
+ and validate the stack
+
+ we cannot know the min/max for the freelist, given only
+ the min/max for the stack
+ */
+
+ lfds610_freelist_query( ss->fs, LFDS610_FREELIST_QUERY_VALIDATE, NULL, (enum lfds610_data_structure_validity *) query_output );
+
+ if( *(enum lfds610_data_structure_validity *) query_output == LFDS610_VALIDITY_VALID )
+ lfds610_stack_internal_validate( ss, (struct lfds610_validation_info *) query_input, (enum lfds610_data_structure_validity *) query_output, ((enum lfds610_data_structure_validity *) query_output)+1 );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds610_stack_internal_validate( struct lfds610_stack_state *ss, struct lfds610_validation_info *vi, enum lfds610_data_structure_validity *stack_validity, enum lfds610_data_structure_validity *freelist_validity )
+{
+ struct lfds610_stack_element
+ *se,
+ *se_slow,
+ *se_fast;
+
+ lfds610_atom_t
+ element_count = 0,
+ total_elements;
+
+ struct lfds610_validation_info
+ freelist_vi;
+
+ assert( ss != NULL );
+ // TRD : vi can be NULL
+ assert( stack_validity != NULL );
+
+ *stack_validity = LFDS610_VALIDITY_VALID;
+
+ se_slow = se_fast = (struct lfds610_stack_element *) ss->top[LFDS610_STACK_POINTER];
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the top of the stack
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the stack)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( se_slow != NULL )
+ do
+ {
+ se_slow = se_slow->next[LFDS610_STACK_POINTER];
+
+ if( se_fast != NULL )
+ se_fast = se_fast->next[LFDS610_STACK_POINTER];
+
+ if( se_fast != NULL )
+ se_fast = se_fast->next[LFDS610_STACK_POINTER];
+ }
+ while( se_slow != NULL and se_fast != se_slow );
+
+ if( se_fast != NULL and se_slow != NULL and se_fast == se_slow )
+ *stack_validity = LFDS610_VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *stack_validity == LFDS610_VALIDITY_VALID and vi != NULL )
+ {
+ se = (struct lfds610_stack_element *) ss->top[LFDS610_STACK_POINTER];
+
+ while( se != NULL )
+ {
+ element_count++;
+ se = (struct lfds610_stack_element *) se->next[LFDS610_STACK_POINTER];
+ }
+
+ if( element_count < vi->min_elements )
+ *stack_validity = LFDS610_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( element_count > vi->max_elements )
+ *stack_validity = LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ /* TRD : now we validate the freelist
+
+ we may be able to check for the expected number of
+ elements in the freelist
+
+ if the caller has given us an expected min and max
+ number of elements in the stack, then the total number
+ of elements in the freelist, minus that min and max,
+ gives us the expected number of elements in the
+ freelist
+ */
+
+ if( vi != NULL )
+ {
+ lfds610_freelist_query( ss->fs, LFDS610_FREELIST_QUERY_ELEMENT_COUNT, NULL, (void *) &total_elements );
+
+ freelist_vi.min_elements = total_elements - vi->max_elements;
+ freelist_vi.max_elements = total_elements - vi->min_elements;
+
+ lfds610_freelist_query( ss->fs, LFDS610_FREELIST_QUERY_VALIDATE, (void *) &freelist_vi, (void *) freelist_validity );
+ }
+
+ if( vi == NULL )
+ lfds610_freelist_query( ss->fs, LFDS610_FREELIST_QUERY_VALIDATE, NULL, (void *) freelist_validity );
+
+ return;
+}
+
--- /dev/null
+/***** public prototypes *****/
+#include "liblfds610.h"
+
+/***** defines *****/
+#define and &&
+#define or ||
+
+#define RAISED 1
+#define LOWERED 0
+
+#define NO_FLAGS 0x0
+
+/***** private prototypes *****/
+void *lfds610_liblfds_aligned_malloc( size_t size, size_t align_in_bytes );
+void lfds610_liblfds_aligned_free( void *memory );
+
+static LFDS610_INLINE lfds610_atom_t lfds610_abstraction_cas( volatile lfds610_atom_t *destination, lfds610_atom_t exchange, lfds610_atom_t compare );
+static LFDS610_INLINE unsigned char lfds610_abstraction_dcas( volatile lfds610_atom_t *destination, lfds610_atom_t *exchange, lfds610_atom_t *compare );
+static LFDS610_INLINE lfds610_atom_t lfds610_abstraction_increment( volatile lfds610_atom_t *value );
+
+/***** inlined code *****/
+#include "lfds610_abstraction/lfds610_abstraction_cas.c"
+#include "lfds610_abstraction/lfds610_abstraction_dcas.c"
+#include "lfds610_abstraction/lfds610_abstraction_increment.c"
+
--- /dev/null
+building test
+=============
+
+Windows (user-mode)
+===================
+1. Use Microsoft Visual Studio 2008 or Visual C++ 2008 Express Edition (or
+ later versions) to load "liblfds.sln". The "Win32" platform is x86,
+ the "x64" platform is x64. The test programme provides the "Release"
+ and "Debug" targets. The other targets ("Release DLL", "Release Lib",
+ "Debug DLL" and "Debug Lib") are carried over will-nilly from the liblds
+ library.
+
+ All builds will work, but DLL builds will require the DLL from liblfds
+ to be placed into a location where the test executable can find it (e.g.
+ the same directory).
+
+2. Use Microsoft Windows SDK and GNUmake to run makefile.windows (obviously
+ you'll need to have run setenv.bat or the appropriate vcvars*.bat first;
+ you can build for x64/64-bit and x86/32-bit - just run the correct batch
+ file).
+
+ If liblfds has been built as a DLL, the DLL from liblfds needs to be
+ placed into a location where the test executable can find it (e.g. the
+ same directory).
+
+ Targets are "rel", "dbg" and "clean". You need to clean between switching
+ targets.
+
+Windows (kernel)
+================
+No build supported, since this is a command line utility.
+
+Linux
+=====
+Use GNUmake to run "makefile.linux". Targets are "rel", "dbg" and
+"clean". You need to clean between switching targets.
+
+If liblfds has been built as a shared object, the shared object file from
+liblfds will need to be placed somewhere the text executable can find it.
+
+A convenient solution is to place the shared object file in the same
+directory as the text executable and set the environment variable
+"LD_LIBRARY_PATH" to ".", e.g. in bash;
+
+export LD_LIBRARY_PATH=.
+
+Remember to unset after finishing testing, or your system will continue
+to scan the current directory for shared object files.
+
+unset LD_LIBRARY_PATH
+
--- /dev/null
+##### paths #####
+BINDIR = bin
+INCDIR = ../liblfds610/inc
+LIBDIR = ../liblfds610/bin
+OBJDIR = obj
+SRCDIR = src
+
+##### misc #####
+QUIETLY = 1>nul 2>nul
+
+##### sources, objects and libraries #####
+BINNAME = test
+BINARY = $(BINDIR)/$(BINNAME)
+SRCDIRS = .
+SOURCES = abstraction_cpu_count.c test_abstraction.c abstraction_thread_start.c abstraction_thread_wait.c benchmark_freelist.c benchmark_queue.c benchmark_ringbuffer.c benchmark_stack.c test_freelist.c main.c misc.c test_queue.c test_ringbuffer.c test_slist.c test_stack.c
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))
+SYSLIBS = -lpthread -lc -lm
+USRLIBS = -llfds610
+
+##### CPU variants #####
+UNAME = $(shell uname -m)
+GCCARCH = -march=$(UNAME)
+
+ifeq ($(UNAME),x86_64)
+ GCCARCH = -march=core2
+endif
+
+ifeq ($(findstring arm,$(UNAME)),arm)
+ GCCARCH = -march=armv6k -marm
+endif
+
+##### tools #####
+MAKE = make
+MFLAGS =
+
+DG = gcc
+DGFLAGS = -MM -std=c99 -I"$(SRCDIR)" -I"$(INCDIR)"
+
+CC = gcc
+CFBASE = -Wall -Wno-unknown-pragmas -std=c99 $(GCCARCH) -pthread -c -I"$(SRCDIR)" -I"$(INCDIR)"
+CFREL = -O2 -Wno-strict-aliasing
+CFDBG = -O0 -g
+
+LD = gcc
+LFBASE = -L"$(LIBDIR)"
+LFREL = -O2 -s
+LFDBG = -O0 -g
+
+##### variants #####
+CFLAGS = $(CFBASE) $(CFDBG)
+LFLAGS = $(LFBASE) $(LFDBG)
+
+ifeq ($(MAKECMDGOALS),rel)
+ CFLAGS = $(CFBASE) $(CFREL)
+ LFLAGS = $(LFBASE) $(LFREL)
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.o : %.c
+ $(DG) $(DGFLAGS) $< >$(OBJDIR)/$*.d
+ $(CC) $(CFLAGS) -o $@ $<
+
+##### explicit rules #####
+$(BINARY) : $(OBJECTS)
+ $(LD) -o $(BINARY) $(LFLAGS) $(OBJECTS) $(USRLIBS) $(SYSLIBS)
+ chmod +x $(BINARY)
+
+##### phony #####
+.PHONY : clean rel dbg
+
+clean :
+ @rm -f $(BINDIR)/$(BINNAME) $(OBJDIR)/*.o $(OBJDIR)/*.d
+
+rel : $(BINARY)
+dbg : $(BINARY)
+
+##### dependencies #####
+-include $(DEPENDS)
+
--- /dev/null
+##### paths #####
+BINDIR = bin
+INCDIR = ../liblfds610/inc
+LIBDIR = ../liblfds610/bin
+OBJDIR = obj
+SRCDIR = src
+
+##### misc #####
+QUIETLY = 1>nul 2>nul
+
+##### sources, objects and libraries #####
+BINNAME = test
+BINARY = $(BINDIR)\$(BINNAME).exe
+SRCDIRS = .
+SOURCES = abstraction_cpu_count.c test_abstraction.c abstraction_thread_start.c abstraction_thread_wait.c benchmark_freelist.c benchmark_queue.c benchmark_ringbuffer.c benchmark_stack.c test_freelist.c main.c misc.c test_queue.c test_ringbuffer.c test_slist.c test_stack.c
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))
+SYSLIBS = kernel32.lib
+USRLIBS = liblfds610.lib
+
+##### tools #####
+MAKE = make
+MFLAGS =
+
+CC = cl
+CFBASE = /nologo /W4 /WX /c "-I$(SRCDIR)" "-I$(INCLUDE)" "-I$(INCDIR)" "/Fd$(BINDIR)\$(BINNAME).pdb" /D UNICODE /D _UNICODE /DWIN32_LEAN_AND_MEAN /D_CRT_SECURE_NO_WARNINGS
+CFREL = /Ox /DNDEBUG /MT
+CFDBG = /Od /Gm /Zi /D_DEBUG /MTd
+
+LD = link
+LFBASE = "/libpath:$(LIB)" "/libpath:$(LIBDIR)" /nologo /subsystem:console /nodefaultlib /nxcompat /wx
+LFREL = /incremental:no
+LFDBG = /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"
+
+##### variants #####
+CFLAGS = $(CFBASE) $(CFDBG)
+LFLAGS = $(LFBASE) $(LFDBG)
+CLIB = libcmtd.lib
+
+ifeq ($(MAKECMDGOALS),rel)
+ CFLAGS = $(CFBASE) $(CFREL)
+ LFLAGS = $(LFBASE) $(LFREL)
+ CLIB = libcmt.lib
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.obj : %.c
+ $(CC) $(CFLAGS) "/Fo$@" $<
+
+##### explicit rules #####
+$(BINARY) : $(OBJECTS)
+ $(LD) $(LFLAGS) $(CLIB) $(SYSLIBS) $(USRLIBS) $(OBJECTS) /out:$(BINARY)
+
+##### phony #####
+.PHONY : clean rel dbg
+
+clean :
+ @erase /Q $(OBJDIR)\*.obj $(BINDIR)\$(BINNAME).* $(QUIETLY)
+
+rel : $(BINARY)
+dbg : $(BINARY)
+
--- /dev/null
+/***** defines *****/
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ !WIN_KERNEL_BUILD indicates Windows user-mode
+ */
+
+ #include <windows.h>
+ typedef HANDLE thread_state_t;
+ typedef DWORD thread_return_t;
+ #define CALLING_CONVENTION WINAPI
+#endif
+
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+ /* TRD : any Windows (kernel-mode) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ WIN_KERNEL_BUILD indicates Windows kernel
+ */
+
+ #include <wdm.h>
+ typedef HANDLE thread_state_t;
+ typedef VOID thread_return_t;
+ #define CALLING_CONVENTION
+#endif
+
+#if (defined __unix__ && defined __GNUC__)
+ /* TRD : any UNIX on any CPU with GCC
+
+ __unix__ indicates Solaris, Linux, HPUX, etc
+ __GNUC__ indicates GCC
+ */
+ #include <unistd.h>
+ #include <pthread.h>
+ #include <sched.h>
+ typedef pthread_t thread_state_t;
+ typedef void * thread_return_t;
+ #define CALLING_CONVENTION
+#endif
+
+typedef thread_return_t (CALLING_CONVENTION *thread_function_t)( void *thread_user_state );
+
+/***** public prototypes *****/
+unsigned int abstraction_cpu_count( void );
+int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state );
+void abstraction_thread_wait( thread_state_t thread_state );
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ !WIN_KERNEL_BUILD indicates Windows user-mode
+ */
+
+ unsigned int abstraction_cpu_count()
+ {
+ SYSTEM_INFO
+ si;
+
+ GetNativeSystemInfo( &si );
+
+ return( (unsigned int) si.dwNumberOfProcessors );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ WIN_KERNEL_BUILD indicates Windows kernel
+ */
+
+ unsigned int abstraction_cpu_count()
+ {
+ unsigned int
+ active_processor_count;
+
+ active_processor_count = KeQueryActiveProcessorCount( NULL );
+
+ return( active_processor_count );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined __linux__ && defined __GNUC__)
+
+ /* TRD : Linux on any CPU with GCC
+
+ this function I believe is Linux specific and varies by UNIX flavour
+
+ __linux__ indicates Linux
+ __GNUC__ indicates GCC
+ */
+
+ unsigned int abstraction_cpu_count()
+ {
+ long int
+ cpu_count;
+
+ cpu_count = sysconf( _SC_NPROCESSORS_ONLN );
+
+ return( (unsigned int) cpu_count );
+ }
+
+#endif
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ !WIN_KERNEL_BUILD indicates Windows user-mode
+ */
+
+ int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state )
+ {
+ int
+ rv = 0;
+
+ DWORD
+ thread_id;
+
+ DWORD_PTR
+ affinity_mask,
+ result;
+
+ assert( thread_state != NULL );
+ // TRD : cpu can be any value in its range
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ affinity_mask = (DWORD_PTR) (1 << cpu);
+
+ *thread_state = CreateThread( NULL, 0, thread_function, thread_user_state, NO_FLAGS, &thread_id );
+
+ result = SetThreadAffinityMask( *thread_state, affinity_mask );
+
+ if( *thread_state != NULL and result != 0 )
+ rv = 1;
+
+ return( rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ WIN_KERNEL_BUILD indicates Windows kernel
+ */
+
+ int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state )
+ {
+ int
+ rv = 0;
+
+ KAFFINITY
+ affinity_mask
+
+ NTSTATUS
+ nts_create,
+ nts_affinity;
+
+ assert( thread_state != NULL );
+ // TRD : cpu can be any value in its range
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ affinity_mask = 1 << cpu;
+
+ nts_create = PsCreateSystemThread( thread_state, THREAD_ALL_ACCESS, NULL, NULL, NULL, thread_function, thread_user_state );
+
+ nts_affinity = ZwSetInformationThread( thread_state, ThreadAffinityMask, &affinity_mask, sizeof(KAFFINITY) );
+
+ if( nts_create == STATUS_SUCCESS and nts_affinity == STATUS_SUCCESS )
+ rv = 1;
+
+ return( rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined __unix__)
+
+ /* TRD : any UNIX on any CPU with any compiler
+
+ I assumed pthreads is available on any UNIX.
+
+ __unix__ indicates Solaris, Linux, HPUX, etc
+ */
+
+ int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state )
+ {
+ int
+ rv = 0,
+ rv_create;
+
+ pthread_attr_t
+ attr;
+
+ cpu_set_t
+ cpuset;
+
+ assert( thread_state != NULL );
+ // TRD : cpu can be any value in its range
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ pthread_attr_init( &attr );
+
+ CPU_ZERO( &cpuset );
+ CPU_SET( cpu, &cpuset );
+ pthread_attr_setaffinity_np( &attr, sizeof(cpuset), &cpuset );
+
+ rv_create = pthread_create( thread_state, &attr, thread_function, thread_user_state );
+
+ if( rv_create == 0 )
+ rv = 1;
+
+ pthread_attr_destroy( &attr );
+
+ return( rv );
+ }
+
+#endif
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ !WIN_KERNEL_BUILD indicates Windows user-mode
+ */
+
+ void abstraction_thread_wait( thread_state_t thread_state )
+ {
+ WaitForSingleObject( thread_state, INFINITE );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)
+
+ /* TRD : any Windows on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ WIN_KERNEL_BUILD indicates Windows kernel
+ */
+
+ void abstraction_thread_wait( thread_state_t thread_state )
+ {
+ KeWaitForSingleObject( thread_state, Executive, KernelMode, FALSE, NULL );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined __unix__)
+
+ /* TRD : any UNIX on any CPU with any compiler
+
+ I assumed pthreads is available on any UNIX.
+
+ __unix__ indicates Solaris, Linux, HPUX, etc
+ */
+
+ void abstraction_thread_wait( thread_state_t thread_state )
+ {
+ pthread_join( thread_state, NULL );
+
+ return;
+ }
+
+#endif
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void benchmark_lfds610_freelist( void )
+{
+ unsigned int
+ loop,
+ thread_count,
+ cpu_count;
+
+ struct lfds610_freelist_state
+ *fs;
+
+ struct lfds610_freelist_benchmark
+ *fb;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds610_atom_t
+ total_operations_for_full_test_for_all_cpus,
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;
+
+ double
+ mean_operations_per_second_per_cpu,
+ difference_per_second_per_cpu,
+ total_difference_per_second_per_cpu,
+ std_dev_per_second_per_cpu,
+ scalability;
+
+ /* TRD : here we benchmark the freelist
+
+ the benchmark is to have a single freelist
+ where a worker thread busy-works popping and then pushing
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );
+
+ fb = (struct lfds610_freelist_benchmark *) malloc( sizeof(struct lfds610_freelist_benchmark) * cpu_count );
+
+ // TRD : print the benchmark ID and CSV header
+ printf( "\n"
+ "Release %s Freelist Benchmark #1\n"
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS610_RELEASE_NUMBER_STRING );
+
+ // TRD : we run CPU count times for scalability
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )
+ {
+ // TRD : initialisation
+ lfds610_freelist_new( &fs, 1000, NULL, NULL );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (fb+loop)->fs = fs;
+ (fb+loop)->operation_count = 0;
+ }
+
+ // TRD : main test
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds610_freelist_thread_pop_and_push, fb+loop );
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : post test math
+ total_operations_for_full_test_for_all_cpus = 0;
+ total_difference_per_second_per_cpu = 0;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ total_operations_for_full_test_for_all_cpus += (fb+loop)->operation_count;
+
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;
+
+ if( thread_count == 1 )
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ {
+ difference_per_second_per_cpu = ((double) (fb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;
+ }
+
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );
+
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);
+
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );
+
+ // TRD : cleanup
+ lfds610_freelist_delete( fs, NULL, NULL );
+ }
+
+ free( fb );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION benchmark_lfds610_freelist_thread_pop_and_push( void *freelist_benchmark )
+{
+ struct lfds610_freelist_benchmark
+ *fb;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ assert( freelist_benchmark != NULL );
+
+ fb = (struct lfds610_freelist_benchmark *) freelist_benchmark;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds610_freelist_pop( fb->fs, &fe );
+ lfds610_freelist_push( fb->fs, fe );
+
+ fb->operation_count += 2;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void benchmark_lfds610_queue( void )
+{
+ unsigned int
+ loop,
+ thread_count,
+ cpu_count;
+
+ struct lfds610_queue_state
+ *qs;
+
+ struct lfds610_queue_benchmark
+ *qb;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds610_atom_t
+ total_operations_for_full_test_for_all_cpus,
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;
+
+ double
+ mean_operations_per_second_per_cpu,
+ difference_per_second_per_cpu,
+ total_difference_per_second_per_cpu,
+ std_dev_per_second_per_cpu,
+ scalability;
+
+ /* TRD : here we benchmark the queue
+
+ the benchmark is to have a single queue
+ where a worker thread busy-works dequeuing and then queuing
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );
+
+ qb = (struct lfds610_queue_benchmark *) malloc( sizeof(struct lfds610_queue_benchmark) * cpu_count );
+
+ // TRD : print the benchmark ID and CSV header
+ printf( "\n"
+ "Release %s Queue Benchmark #1\n"
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS610_RELEASE_NUMBER_STRING );
+
+ // TRD : we run CPU count times for scalability
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )
+ {
+ // TRD : initialisation
+ lfds610_queue_new( &qs, 1000 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (qb+loop)->qs = qs;
+ (qb+loop)->operation_count = 0;
+ }
+
+ // TRD : populate the queue (we don't actually use the user data)
+ for( loop = 0 ; loop < 500 ; loop++ )
+ lfds610_queue_enqueue( qs, (void *) (lfds610_atom_t) loop );
+
+ // TRD : main test
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds610_queue_thread_delfds610_queue_and_enqueue, qb+loop );
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : post test math
+ total_operations_for_full_test_for_all_cpus = 0;
+ total_difference_per_second_per_cpu = 0;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ total_operations_for_full_test_for_all_cpus += (qb+loop)->operation_count;
+
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;
+
+ if( thread_count == 1 )
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ {
+ difference_per_second_per_cpu = ((double) (qb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;
+ }
+
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );
+
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);
+
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );
+
+ // TRD : cleanup
+ lfds610_queue_delete( qs, NULL, NULL );
+ }
+
+ free( qb );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION benchmark_lfds610_queue_thread_delfds610_queue_and_enqueue( void *queue_benchmark )
+{
+ struct lfds610_queue_benchmark
+ *qb;
+
+ void
+ *user_data;
+
+ time_t
+ start_time;
+
+ assert( queue_benchmark != NULL );
+
+ qb = (struct lfds610_queue_benchmark *) queue_benchmark;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds610_queue_dequeue( qb->qs, &user_data );
+ lfds610_queue_enqueue( qb->qs, user_data );
+
+ qb->operation_count += 2;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void benchmark_lfds610_ringbuffer( void )
+{
+ unsigned int
+ loop,
+ thread_count,
+ cpu_count;
+
+ struct lfds610_ringbuffer_state
+ *rs;
+
+ struct lfds610_ringbuffer_benchmark
+ *rb;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds610_atom_t
+ total_operations_for_full_test_for_all_cpus,
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;
+
+ double
+ mean_operations_per_second_per_cpu,
+ difference_per_second_per_cpu,
+ total_difference_per_second_per_cpu,
+ std_dev_per_second_per_cpu,
+ scalability;
+
+ /* TRD : here we benchmark the ringbuffer
+
+ the benchmark is to have a single ringbuffer
+ where a worker thread busy-works writing and then reading
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );
+
+ rb = (struct lfds610_ringbuffer_benchmark *) malloc( sizeof(struct lfds610_ringbuffer_benchmark) * cpu_count );
+
+ // TRD : print the benchmark ID and CSV header
+ printf( "\n"
+ "Release %s Ringbuffer Benchmark #1\n"
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS610_RELEASE_NUMBER_STRING );
+
+ // TRD : we run CPU count times for scalability
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )
+ {
+ // TRD : initialisation
+ lfds610_ringbuffer_new( &rs, 1000, NULL, NULL );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (rb+loop)->rs = rs;
+ (rb+loop)->operation_count = 0;
+ }
+
+ // TRD : main test
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds610_ringbuffer_thread_write_and_read, rb+loop );
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : post test math
+ total_operations_for_full_test_for_all_cpus = 0;
+ total_difference_per_second_per_cpu = 0;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ total_operations_for_full_test_for_all_cpus += (rb+loop)->operation_count;
+
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;
+
+ if( thread_count == 1 )
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ {
+ difference_per_second_per_cpu = ((double) (rb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;
+ }
+
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );
+
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);
+
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );
+
+ // TRD : cleanup
+ lfds610_ringbuffer_delete( rs, NULL, NULL );
+ }
+
+ free( rb );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION benchmark_lfds610_ringbuffer_thread_write_and_read( void *ringbuffer_benchmark )
+{
+ struct lfds610_ringbuffer_benchmark
+ *rb;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ assert( ringbuffer_benchmark != NULL );
+
+ rb = (struct lfds610_ringbuffer_benchmark *) ringbuffer_benchmark;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds610_ringbuffer_get_write_element( rb->rs, &fe, NULL );
+ lfds610_ringbuffer_put_write_element( rb->rs, fe );
+
+ lfds610_ringbuffer_get_read_element( rb->rs, &fe );
+ lfds610_ringbuffer_put_read_element( rb->rs, fe );
+
+ rb->operation_count += 2;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void benchmark_lfds610_stack( void )
+{
+ unsigned int
+ loop,
+ thread_count,
+ cpu_count;
+
+ struct lfds610_stack_state
+ *ss;
+
+ struct lfds610_stack_benchmark
+ *sb;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds610_atom_t
+ total_operations_for_full_test_for_all_cpus,
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;
+
+ double
+ mean_operations_per_second_per_cpu,
+ difference_per_second_per_cpu,
+ total_difference_per_second_per_cpu,
+ std_dev_per_second_per_cpu,
+ scalability;
+
+ /* TRD : here we benchmark the stack
+
+ the benchmark is to have a single stack
+ where a worker thread busy-works pushing then popping
+ */
+
+ cpu_count = abstraction_cpu_count();
+
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );
+
+ sb = (struct lfds610_stack_benchmark *) malloc( sizeof(struct lfds610_stack_benchmark) * cpu_count );
+
+ // TRD : print the benchmark ID and CSV header
+ printf( "\n"
+ "Release %s Stack Benchmark #1\n"
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS610_RELEASE_NUMBER_STRING );
+
+ // TRD : we run CPU count times for scalability
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )
+ {
+ // TRD : initialisation
+ lfds610_stack_new( &ss, 1000 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (sb+loop)->ss = ss;
+ (sb+loop)->operation_count = 0;
+ }
+
+ // TRD : main test
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds610_stack_thread_push_and_pop, sb+loop );
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : post test math
+ total_operations_for_full_test_for_all_cpus = 0;
+ total_difference_per_second_per_cpu = 0;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ total_operations_for_full_test_for_all_cpus += (sb+loop)->operation_count;
+
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;
+
+ if( thread_count == 1 )
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;
+
+ for( loop = 0 ; loop < thread_count ; loop++ )
+ {
+ difference_per_second_per_cpu = ((double) (sb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;
+ }
+
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );
+
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);
+
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );
+
+ // TRD : cleanup
+ lfds610_stack_delete( ss, NULL, NULL );
+ }
+
+ free( sb );
+
+ free( thread_handles );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION benchmark_lfds610_stack_thread_push_and_pop( void *stack_benchmark )
+{
+ struct lfds610_stack_benchmark
+ *sb;
+
+ void
+ *user_data = NULL;
+
+ time_t
+ start_time;
+
+ assert( stack_benchmark != NULL );
+
+ sb = (struct lfds610_stack_benchmark *) stack_benchmark;
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds610_stack_push( sb->ss, user_data );
+ lfds610_stack_pop( sb->ss, &user_data );
+
+ sb->operation_count += 2;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** ANSI includes *****/
+/* TRD : _GNU_SOURCE is required by sched.h for pthread_attr_setaffinity_np, CPU_ZERO and CPU_SET
+ however it has to be defined very early as even the ANSI headers pull in stuff
+ which uses _GNU_SOURCE and which I think must be protected against multiple inclusion,
+ which basically means if you set it too late, it's not seen, because the headers
+ have already been parsed with _GNU_SOURCE unset
+*/
+
+#define _GNU_SOURCE
+
+#include <assert.h>
+#include <math.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+/***** internal includes *****/
+#include "abstraction.h"
+
+/***** external includes *****/
+#include "liblfds610.h"
+
+/***** defines *****/
+#define and &&
+#define or ||
+
+#define RAISED 1
+#define LOWERED 0
+
+#define NO_FLAGS 0x0
+
+/***** enums *****/
+enum lfds610_test_operation
+{
+ UNKNOWN,
+ HELP,
+ TEST,
+ BENCHMARK
+};
+
+/***** structs *****/
+#include "structures.h"
+
+/***** prototypes *****/
+int main( int argc, char **argv );
+
+void internal_display_test_name( char *test_name );
+void internal_display_test_result( unsigned int number_name_dvs_pairs, ... );
+void internal_display_lfds610_data_structure_validity( enum lfds610_data_structure_validity dvs );
+
+void benchmark_lfds610_freelist( void );
+ thread_return_t CALLING_CONVENTION benchmark_lfds610_freelist_thread_pop_and_push( void *freelist_benchmark );
+
+void benchmark_lfds610_queue( void );
+ thread_return_t CALLING_CONVENTION benchmark_lfds610_queue_thread_delfds610_queue_and_enqueue( void *queue_benchmark );
+
+void benchmark_lfds610_ringbuffer( void );
+ thread_return_t CALLING_CONVENTION benchmark_lfds610_ringbuffer_thread_write_and_read( void *ringbuffer_benchmark );
+
+void benchmark_lfds610_stack( void );
+ thread_return_t CALLING_CONVENTION benchmark_lfds610_stack_thread_push_and_pop( void *stack_benchmark );
+
+void test_lfds610_abstraction( void );
+ void abstraction_test_increment( void );
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_increment( void *shared_counter );
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_atomic_increment( void *shared_counter );
+ void abstraction_test_cas( void );
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_cas( void *abstraction_test_cas_state );
+ void abstraction_test_dcas( void );
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_dcas( void *abstraction_test_dcas_state );
+
+void test_lfds610_freelist( void );
+ void freelist_test_internal_popping( void );
+ int freelist_test_internal_popping_init( void **user_data, void *user_state );
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping( void *freelist_test_popping_state );
+ void freelist_test_internal_pushing( void );
+ int freelist_test_internal_pushing_init( void **user_data, void *user_state );
+ void freelist_test_internal_pushing_delete( void *user_data, void *user_state );
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_pushing( void *freelist_test_pushing_state );
+ void freelist_test_internal_popping_and_pushing( void );
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_popping( void *freelist_test_popping_and_pushing_state );
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_pushing( void *freelist_test_popping_and_pushing_state );
+ void freelist_test_internal_rapid_popping_and_pushing( void );
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_rapid_popping_and_pushing( void *lfds610_freelist_state );
+
+void test_lfds610_queue( void );
+ void queue_test_enqueuing( void );
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_enqueuer( void *queue_test_enqueuing_state );
+ void queue_test_dequeuing( void );
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_dequeuer( void *queue_test_dequeuing_state );
+ void queue_test_enqueuing_and_dequeuing( void );
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_enqueuer_and_dequeuer( void *queue_test_rapid_enqueuing_and_dequeuing_state );
+ void queue_test_rapid_enqueuing_and_dequeuing( void );
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_rapid_enqueuer_and_dequeuer( void *queue_test_rapid_enqueuing_and_dequeuing_state );
+
+void test_lfds610_ringbuffer( void );
+ void ringbuffer_test_reading( void );
+ thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_reader( void *ringbuffer_test_reading_state );
+ void ringbuffer_test_writing( void );
+ thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_writer( void *ringbuffer_test_writing_state );
+ void ringbuffer_test_reading_and_writing( void );
+ thread_return_t CALLING_CONVENTION ringbuffer_test_thread_reader_writer( void *ringbuffer_test_reading_and_writing_state );
+
+void test_lfds610_slist( void );
+ void test_slist_new_delete_get( void );
+ thread_return_t CALLING_CONVENTION slist_test_internal_thread_new_delete_get_new_head_and_next( void *slist_test_state );
+ thread_return_t CALLING_CONVENTION slist_test_internal_thread_new_delete_get_delete_and_get( void *slist_test_state );
+ void test_slist_get_set_user_data( void );
+ thread_return_t CALLING_CONVENTION slist_test_internal_thread_get_set_user_data( void *slist_test_state );
+ void test_slist_delete_all_elements( void );
+
+void test_lfds610_stack( void );
+ void stack_test_internal_popping( void );
+ thread_return_t CALLING_CONVENTION stack_test_internal_thread_popping( void *stack_test_popping_state );
+ void stack_test_internal_pushing( void );
+ thread_return_t CALLING_CONVENTION stack_test_internal_thread_pushing( void *stack_test_pushing_state );
+ void stack_test_internal_popping_and_pushing( void );
+ thread_return_t CALLING_CONVENTION stack_test_internal_thread_popping_and_pushing_start_popping( void *stack_test_popping_and_pushing_state );
+ thread_return_t CALLING_CONVENTION stack_test_internal_thread_popping_and_pushing_start_pushing( void *stack_test_popping_and_pushing_state );
+ void stack_test_internal_rapid_popping_and_pushing( void );
+ thread_return_t CALLING_CONVENTION stack_test_internal_thread_rapid_popping_and_pushing( void *stack_state );
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+int main( int argc, char **argv )
+{
+ enum lfds610_test_operation
+ operation = UNKNOWN;
+
+ unsigned int
+ loop,
+ iterations = 1;
+
+ assert( argc >= 1 );
+ assert( argv != NULL );
+
+ if( argc == 1 or argc >= 4 )
+ operation = HELP;
+
+ if( operation == UNKNOWN )
+ {
+ if( 0 == strcmp(*(argv+1), "test") )
+ {
+ operation = TEST;
+
+ // TRD : sscanf() may fail, but iterations is initialised to 1, so it's okay
+ if( argc == 3 )
+ sscanf( *(argv+2), "%u", &iterations );
+ }
+
+ if( 0 == strcmp(*(argv+1), "benchmark") )
+ {
+ operation = BENCHMARK;
+
+ // TRD : sscanf() may fail, but iterations is initialised to 1, so it's okay
+ if( argc == 3 )
+ sscanf( *(argv+2), "%u", &iterations );
+ }
+ }
+
+ switch( operation )
+ {
+ case UNKNOWN:
+ case HELP:
+ printf( "test [test|benchmark] [iterations]\n"
+ " test : run the test suite\n"
+ " benchmark : run the benchmark suite\n"
+ " iterations : optional, default is 1\n" );
+ break;
+
+ case TEST:
+ for( loop = 1 ; loop < iterations+1 ; loop++ )
+ {
+ printf( "\n"
+ "Test Iteration %02u\n"
+ "=================\n", loop );
+
+ test_lfds610_abstraction();
+ test_lfds610_freelist();
+ test_lfds610_queue();
+ test_lfds610_ringbuffer();
+ test_lfds610_slist();
+ test_lfds610_stack();
+ }
+ break;
+
+ case BENCHMARK:
+ for( loop = 1 ; loop < iterations+1 ; loop++ )
+ {
+ printf( "\n"
+ "Benchmark Iteration %02u\n"
+ "========================\n", loop );
+
+ benchmark_lfds610_freelist();
+ benchmark_lfds610_queue();
+ benchmark_lfds610_ringbuffer();
+ benchmark_lfds610_stack();
+ }
+ break;
+ }
+
+ return( EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void internal_display_test_name( char *test_name )
+{
+ assert( test_name != NULL );
+
+ printf( "%s...", test_name );
+ fflush( stdout );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void internal_display_test_result( unsigned int number_name_dvs_pairs, ... )
+{
+ va_list
+ va;
+
+ int
+ passed_flag = RAISED;
+
+ unsigned int
+ loop;
+
+ char
+ *name;
+
+ enum lfds610_data_structure_validity
+ dvs;
+
+ // TRD : number_name_dvs_pairs can be any value in its range
+
+ va_start( va, number_name_dvs_pairs );
+
+ for( loop = 0 ; loop < number_name_dvs_pairs ; loop++ )
+ {
+ name = va_arg( va, char * );
+ dvs = va_arg( va, enum lfds610_data_structure_validity );
+
+ if( dvs != LFDS610_VALIDITY_VALID )
+ {
+ passed_flag = LOWERED;
+ break;
+ }
+ }
+
+ va_end( va );
+
+ if( passed_flag == RAISED )
+ puts( "passed" );
+
+ if( passed_flag == LOWERED )
+ {
+ printf( "failed (" );
+
+ va_start( va, number_name_dvs_pairs );
+
+ for( loop = 0 ; loop < number_name_dvs_pairs ; loop++ )
+ {
+ name = va_arg( va, char * );
+ dvs = va_arg( va, enum lfds610_data_structure_validity );
+
+ printf( "%s ", name );
+ internal_display_lfds610_data_structure_validity( dvs );
+
+ if( loop+1 < number_name_dvs_pairs )
+ printf( ", " );
+ }
+
+ va_end( va );
+
+ printf( ")\n" );
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void internal_display_lfds610_data_structure_validity( enum lfds610_data_structure_validity dvs )
+{
+ char
+ *string = NULL;
+
+ switch( dvs )
+ {
+ case LFDS610_VALIDITY_VALID:
+ string = "valid";
+ break;
+
+ case LFDS610_VALIDITY_INVALID_LOOP:
+ string = "invalid - loop detected";
+ break;
+
+ case LFDS610_VALIDITY_INVALID_MISSING_ELEMENTS:
+ string = "invalid - missing elements";
+ break;
+
+ case LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS:
+ string = "invalid - additional elements";
+ break;
+
+ case LFDS610_VALIDITY_INVALID_TEST_DATA:
+ string = "invalid - invalid test data";
+ break;
+ }
+
+ printf( "%s", string );
+
+ return;
+}
+
--- /dev/null
+/***** structs *****/
+#pragma pack( push, LFDS610_ALIGN_DOUBLE_POINTER )
+
+/***** abstraction tests *****/
+struct abstraction_test_cas_state
+{
+ volatile lfds610_atom_t
+ *shared_counter;
+
+ lfds610_atom_t
+ local_counter;
+};
+
+struct abstraction_test_dcas_state
+{
+ volatile lfds610_atom_t
+ *shared_counter;
+
+ lfds610_atom_t
+ local_counter;
+};
+
+/***** freelist tests *****/
+struct freelist_test_popping_state
+{
+ struct lfds610_freelist_state
+ *fs,
+ *fs_thread_local;
+};
+
+struct freelist_test_pushing_state
+{
+ lfds610_atom_t
+ *count,
+ thread_number;
+
+ struct lfds610_freelist_state
+ *source_fs,
+ *fs;
+};
+
+struct freelist_test_popping_and_pushing_state
+{
+ struct lfds610_freelist_state
+ *local_fs,
+ *fs;
+};
+
+struct freelist_test_counter_and_thread_number
+{
+ lfds610_atom_t
+ thread_number;
+
+ unsigned long long int
+ counter;
+};
+
+/***** queue tests *****/
+struct queue_test_enqueuing_state
+{
+ struct lfds610_queue_state
+ *qs;
+
+ lfds610_atom_t
+ counter;
+};
+
+struct queue_test_dequeuing_state
+{
+ struct lfds610_queue_state
+ *qs;
+
+ int
+ error_flag;
+};
+
+struct queue_test_enqueuing_and_dequeuing_state
+{
+ struct lfds610_queue_state
+ *qs;
+
+ lfds610_atom_t
+ counter,
+ thread_number,
+ *per_thread_counters;
+
+ unsigned int
+ cpu_count;
+
+ int
+ error_flag;
+};
+
+struct queue_test_rapid_enqueuing_and_dequeuing_state
+{
+ struct lfds610_queue_state
+ *qs;
+
+ lfds610_atom_t
+ counter;
+};
+
+/***** ringbuffer tests *****/
+struct ringbuffer_test_reading_state
+{
+ struct lfds610_ringbuffer_state
+ *rs;
+
+ int
+ error_flag;
+
+ lfds610_atom_t
+ read_count;
+};
+
+struct ringbuffer_test_writing_state
+{
+ struct lfds610_ringbuffer_state
+ *rs;
+
+ lfds610_atom_t
+ write_count;
+};
+
+struct ringbuffer_test_reading_and_writing_state
+{
+ struct lfds610_ringbuffer_state
+ *rs;
+
+ lfds610_atom_t
+ counter,
+ *per_thread_counters;
+
+ unsigned int
+ cpu_count;
+
+ int
+ error_flag;
+};
+
+/***** slist tests *****/
+struct slist_test_state
+{
+ struct lfds610_slist_state
+ *ss;
+
+ size_t
+ create_count,
+ delete_count;
+
+ lfds610_atom_t
+ thread_and_count;
+};
+
+/***** stack tests *****/
+struct stack_test_popping_state
+{
+ struct lfds610_stack_state
+ *ss,
+ *ss_thread_local;
+};
+
+struct stack_test_pushing_state
+{
+ lfds610_atom_t
+ thread_number;
+
+ struct lfds610_stack_state
+ *ss;
+};
+
+struct stack_test_popping_and_pushing_state
+{
+ struct lfds610_stack_state
+ *local_ss,
+ *ss;
+};
+
+struct stack_test_counter_and_thread_number
+{
+ lfds610_atom_t
+ thread_number,
+ counter;
+};
+
+/***** freelist benchmarks *****/
+struct lfds610_freelist_benchmark
+{
+ struct lfds610_freelist_state
+ *fs;
+
+ lfds610_atom_t
+ operation_count;
+};
+
+/***** queue benchmarks *****/
+struct lfds610_queue_benchmark
+{
+ struct lfds610_queue_state
+ *qs;
+
+ lfds610_atom_t
+ operation_count;
+};
+
+/***** ringbuffer benchmarks *****/
+struct lfds610_ringbuffer_benchmark
+{
+ struct lfds610_ringbuffer_state
+ *rs;
+
+ lfds610_atom_t
+ operation_count;
+};
+
+/***** stack benchmarks *****/
+struct lfds610_stack_benchmark
+{
+ struct lfds610_stack_state
+ *ss;
+
+ lfds610_atom_t
+ operation_count;
+};
+
+#pragma pack( pop )
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds610_abstraction( void )
+{
+ printf( "\n"
+ "Abstraction Tests\n"
+ "=================\n" );
+
+ abstraction_test_increment();
+ abstraction_test_cas();
+ abstraction_test_dcas();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void abstraction_test_increment( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds610_atom_t
+ shared_counter,
+ atomic_shared_counter;
+
+ /* TRD : here we test lfds610_abstraction_increment
+
+ first, we run one thread per CPU where each thread increments
+ a shared counter 10,000,000 times - however, this first test
+ does NOT use atomic increment; it uses "++"
+
+ second, we repeat the exercise, but this time using
+ lfds610_abstraction_increment()
+
+ if the final value in the first test is less than (10,000,000*cpu_count)
+ then the system is sensitive to non-atomic increments; this means if
+ our atomic version of the test passes, we can have some degree of confidence
+ that it works
+
+ if the final value in the first test is in fact correct, then we can't know
+ that our atomic version has changed anything
+
+ and of course if the final value in the atomic test is wrong, we know things
+ are broken
+ */
+
+ internal_display_test_name( "Atomic increment" );
+
+ cpu_count = abstraction_cpu_count();
+
+ shared_counter = 0;
+ atomic_shared_counter = 0;
+
+ LFDS610_BARRIER_STORE;
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ // TRD : non-atomic
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_increment, &shared_counter );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : atomic
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_atomic_increment, &atomic_shared_counter );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : results
+ if( shared_counter < (10000000 * cpu_count) and atomic_shared_counter == (10000000 * cpu_count) )
+ puts( "passed" );
+
+ if( shared_counter == (10000000 * cpu_count) and atomic_shared_counter == (10000000 * cpu_count) )
+ puts( "indeterminate" );
+
+ if( atomic_shared_counter < (10000000 * cpu_count) )
+ puts( "failed" );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_increment( void *shared_counter )
+{
+ assert( shared_counter != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ lfds610_liblfds_abstraction_test_helper_increment_non_atomic( shared_counter );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_atomic_increment( void *shared_counter )
+{
+ assert( shared_counter != NULL );
+
+ LFDS610_BARRIER_LOAD;
+
+ lfds610_liblfds_abstraction_test_helper_increment_atomic( shared_counter );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void abstraction_test_cas( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct abstraction_test_cas_state
+ *atcs;
+
+ LFDS610_ALIGN(LFDS610_ALIGN_SINGLE_POINTER) volatile lfds610_atom_t
+ shared_counter = 0;
+
+ lfds610_atom_t
+ local_total = 0;
+
+ // TRD : number_logical_processors can be any value in its range
+
+ /* TRD : here we test lfds610_abstraction_cas
+
+ we run one thread per CPU
+ we use lfds610_abstraction_cas() to increment a shared counter
+ every time a thread successfully increments the counter,
+ it increments a thread local counter
+ the threads run for ten seconds
+ after the threads finish, we total the local counters
+ they should equal the shared counter
+ */
+
+ internal_display_test_name( "Atomic CAS" );
+
+ cpu_count = abstraction_cpu_count();
+
+ atcs = malloc( sizeof(struct abstraction_test_cas_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (atcs+loop)->shared_counter = &shared_counter;
+ (atcs+loop)->local_counter = 0;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_cas, atcs+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : results
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ local_total += (atcs+loop)->local_counter;
+
+ if( local_total == shared_counter )
+ puts( "passed" );
+
+ if( local_total != shared_counter )
+ puts( "failed" );
+
+ // TRD : cleanup
+ free( atcs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_cas( void *abstraction_test_cas_state )
+{
+ struct abstraction_test_cas_state
+ *atcs;
+
+ assert( abstraction_test_cas_state != NULL );
+
+ atcs = (struct abstraction_test_cas_state *) abstraction_test_cas_state;
+
+ LFDS610_BARRIER_LOAD;
+
+ lfds610_liblfds_abstraction_test_helper_cas( atcs->shared_counter, &atcs->local_counter );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void abstraction_test_dcas( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct abstraction_test_dcas_state
+ *atds;
+
+ LFDS610_ALIGN(LFDS610_ALIGN_DOUBLE_POINTER) volatile lfds610_atom_t
+ shared_counter[2] = { 0, 0 };
+
+ lfds610_atom_t
+ local_total = 0;
+
+ /* TRD : here we test lfds610_abstraction_dcas
+
+ we run one thread per CPU
+ we use lfds610_abstraction_dcas() to increment a shared counter
+ every time a thread successfully increments the counter,
+ it increments a thread local counter
+ the threads run for ten seconds
+ after the threads finish, we total the local counters
+ they should equal the shared counter
+ */
+
+ internal_display_test_name( "Atomic DCAS" );
+
+ cpu_count = abstraction_cpu_count();
+
+ atds = malloc( sizeof(struct abstraction_test_dcas_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (atds+loop)->shared_counter = shared_counter;
+ (atds+loop)->local_counter = 0;
+ }
+
+ LFDS610_BARRIER_STORE;
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_dcas, atds+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : results
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ local_total += (atds+loop)->local_counter;
+
+ if( local_total == shared_counter[0] )
+ puts( "passed" );
+
+ if( local_total != shared_counter[0] )
+ puts( "failed" );
+
+ // TRD : cleanup
+ free( atds );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_dcas( void *abstraction_test_dcas_state )
+{
+ struct abstraction_test_dcas_state
+ *atds;
+
+ assert( abstraction_test_dcas_state != NULL );
+
+ atds = (struct abstraction_test_dcas_state *) abstraction_test_dcas_state;
+
+ LFDS610_BARRIER_LOAD;
+
+ lfds610_liblfds_abstraction_test_helper_dcas( atds->shared_counter, &atds->local_counter );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds610_freelist( void )
+{
+ printf( "\n"
+ "Freelist Tests\n"
+ "==============\n" );
+
+ freelist_test_internal_popping();
+ freelist_test_internal_pushing();
+ freelist_test_internal_popping_and_pushing();
+ freelist_test_internal_rapid_popping_and_pushing();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void freelist_test_internal_popping( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ lfds610_atom_t
+ count = 0;
+
+ thread_state_t
+ *thread_handles;
+
+ enum lfds610_data_structure_validity
+ dvs = LFDS610_VALIDITY_VALID;
+
+ struct lfds610_freelist_state
+ *fs;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ struct freelist_test_popping_state
+ *ftps;
+
+ unsigned int
+ *found_count;
+
+ /* TRD : we create a freelist with 1,000,000 elements
+
+ the creation function runs in a single thread and creates
+ and pushes those elements onto the freelist
+
+ each element contains a void pointer which is its element number
+
+ we then run one thread per CPU
+ where each thread loops, popping as quickly as possible
+ each popped element is pushed onto a thread-local freelist
+
+ the threads run till the source freelist is empty
+
+ we then check the thread-local freelists
+ we should find we have every element
+
+ then tidy up
+ */
+
+ internal_display_test_name( "Popping" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_freelist_new( &fs, 1000000, freelist_test_internal_popping_init, &count );
+ ftps = malloc( sizeof(struct freelist_test_popping_state) * cpu_count );
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (ftps+loop)->fs = fs;
+ lfds610_freelist_new( &(ftps+loop)->fs_thread_local, 0, NULL, NULL );
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_popping, ftps+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : now we check the thread-local freelists
+ found_count = malloc( sizeof(unsigned int) * 1000000 );
+ for( loop = 0 ; loop < 1000000 ; loop++ )
+ *(found_count+loop) = 0;
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ while( lfds610_freelist_pop((ftps+loop)->fs_thread_local, &fe) )
+ {
+ lfds610_freelist_get_user_data_from_element( fe, (void **) &count );
+ (*(found_count+count))++;
+ lfds610_freelist_push( fs, fe );
+ }
+ }
+
+ for( loop = 0 ; loop < 1000000 and dvs == LFDS610_VALIDITY_VALID ; loop++ )
+ {
+ if( *(found_count+loop) == 0 )
+ dvs = LFDS610_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( *(found_count+loop) > 1 )
+ dvs = LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ // TRD : cleanup
+ free( found_count );
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ lfds610_freelist_delete( (ftps+loop)->fs_thread_local, NULL, NULL );
+ free( ftps );
+ lfds610_freelist_delete( fs, NULL, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int freelist_test_internal_popping_init( void **user_data, void *user_state )
+{
+ lfds610_atom_t
+ *count;
+
+ assert( user_data != NULL );
+ assert( user_state != NULL );
+
+ count = (lfds610_atom_t *) user_state;
+
+ *(lfds610_atom_t *) user_data = (*count)++;
+
+ return( 1 );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping( void *freelist_test_popping_state )
+{
+ struct freelist_test_popping_state
+ *ftps;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ assert( freelist_test_popping_state != NULL );
+
+ ftps = (struct freelist_test_popping_state *) freelist_test_popping_state;
+
+ lfds610_freelist_use( ftps->fs );
+ lfds610_freelist_use( ftps->fs_thread_local );
+
+ while( lfds610_freelist_pop(ftps->fs, &fe) )
+ lfds610_freelist_push( ftps->fs_thread_local, fe );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void freelist_test_internal_pushing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ lfds610_atom_t
+ count = 0;
+
+ thread_state_t
+ *thread_handles;
+
+ enum lfds610_data_structure_validity
+ dvs;
+
+ struct freelist_test_pushing_state
+ *ftps;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ struct lfds610_freelist_state
+ *fs,
+ *cleanup_fs;
+
+ struct freelist_test_counter_and_thread_number
+ *cnt,
+ *counter_and_number_trackers;
+
+ struct lfds610_validation_info
+ vi;
+
+ /* TRD : we create an empty freelist, which we will push to
+
+ we then create one freelist per CPU, where this freelist
+ contains 100,000 elements per thread and
+ each element is an incrementing counter and unique ID
+ (from 0 to number of CPUs)
+
+ we then start one thread per CPU, where each thread is
+ given one of the populated freelists and pops from that
+ to push to the empty freelist
+
+ the reason for this is to achieve memory pre-allocation
+ which allows the pushing threads to run at maximum speed
+
+ the threads end when their freelists are empty
+
+ we then fully pop the now populated main freelist (onto
+ a second freelist, so we can cleanly free all memory),
+ checking that the counts increment on a per unique ID basis
+ and that the number of elements we pop equals 1,000,000 per thread
+ (since each element has an incrementing counter which is
+ unique on a per unique ID basis, we can know we didn't lose
+ any elements)
+ */
+
+ internal_display_test_name( "Pushing" );
+
+ cpu_count = abstraction_cpu_count();
+
+ ftps = malloc( sizeof(struct freelist_test_pushing_state) * cpu_count );
+
+ lfds610_freelist_new( &fs, 0, NULL, NULL );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (ftps+loop)->thread_number = (lfds610_atom_t) loop;
+ // TRD : note count is shared across threads, so thread 0 is 0-100000, thread 1 is 100000-200000, etc
+ (ftps+loop)->count = &count;
+ lfds610_freelist_new( &(ftps+loop)->source_fs, 100000, freelist_test_internal_pushing_init, (void *) (ftps+loop) );
+ (ftps+loop)->fs = fs;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_pushing, ftps+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : now fully pop and verify the main freelist
+ lfds610_freelist_new( &cleanup_fs, 0, NULL, NULL );
+
+ counter_and_number_trackers = malloc( sizeof(struct freelist_test_counter_and_thread_number) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (counter_and_number_trackers+loop)->counter = 100000 * loop;
+ (counter_and_number_trackers+loop)->thread_number = (lfds610_atom_t) loop;
+ }
+
+ vi.min_elements = vi.max_elements = 100000 * cpu_count;
+
+ lfds610_freelist_query( fs, LFDS610_FREELIST_QUERY_VALIDATE, &vi, (void *) &dvs );
+
+ while( dvs == LFDS610_VALIDITY_VALID and lfds610_freelist_pop(fs, &fe) )
+ {
+ lfds610_freelist_get_user_data_from_element( fe, (void **) &cnt );
+
+ if( cnt->counter != (counter_and_number_trackers+cnt->thread_number)->counter++ )
+ dvs = LFDS610_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ lfds610_freelist_push( cleanup_fs, fe );
+ }
+
+ // TRD : clean up
+ free( counter_and_number_trackers );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ lfds610_freelist_delete( (ftps+loop)->source_fs, NULL, NULL );
+
+ free( ftps );
+
+ lfds610_freelist_delete( cleanup_fs, freelist_test_internal_pushing_delete, NULL );
+ lfds610_freelist_delete( fs, NULL, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int freelist_test_internal_pushing_init( void **user_data, void *user_state )
+{
+ struct freelist_test_counter_and_thread_number
+ *ftcatn;
+
+ struct freelist_test_pushing_state
+ *ftps;
+
+ assert( user_data != NULL );
+ // TRD : user_state is being used as an integer type
+
+ *user_data = malloc( sizeof(struct freelist_test_counter_and_thread_number) );
+ ftps = (struct freelist_test_pushing_state *) user_state;
+
+ ftcatn = (struct freelist_test_counter_and_thread_number *) *user_data;
+
+ ftcatn->counter = (*ftps->count)++;
+ ftcatn->thread_number = ftps->thread_number;
+
+ return( 1 );
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void freelist_test_internal_pushing_delete( void *user_data, void *user_state )
+{
+ assert( user_data != NULL );
+ assert( user_state == NULL );
+
+ free( user_data );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_pushing( void *freelist_test_pushing_state )
+{
+ struct freelist_test_pushing_state
+ *ftps;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ assert( freelist_test_pushing_state != NULL );
+
+ ftps = (struct freelist_test_pushing_state *) freelist_test_pushing_state;
+
+ lfds610_freelist_use( ftps->source_fs );
+ lfds610_freelist_use( ftps->fs );
+
+ while( lfds610_freelist_pop(ftps->source_fs, &fe) )
+ lfds610_freelist_push( ftps->fs, fe );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void freelist_test_internal_popping_and_pushing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ enum lfds610_data_structure_validity
+ dvs;
+
+ struct lfds610_freelist_state
+ *fs;
+
+ struct freelist_test_popping_and_pushing_state
+ *pps;
+
+ struct lfds610_validation_info
+ vi;
+
+ /* TRD : we have two threads per CPU
+ the threads loop for ten seconds
+ the first thread pushes 100000 elements then pops 100000 elements
+ the second thread pops 100000 elements then pushes 100000 elements
+ all pushes and pops go onto the single main freelist
+
+ after time is up, all threads push what they have remaining onto
+ the main freelist
+
+ we then validate the main freelist
+ */
+
+ internal_display_test_name( "Popping and pushing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_freelist_new( &fs, 100000 * cpu_count, NULL, NULL );
+
+ pps = malloc( sizeof(struct freelist_test_popping_and_pushing_state) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (pps+loop)->fs = fs;
+ lfds610_freelist_new( &(pps+loop)->local_fs, 0, NULL, NULL );
+
+ (pps+loop+cpu_count)->fs = fs;
+ lfds610_freelist_new( &(pps+loop+cpu_count)->local_fs, 100000, NULL, NULL );
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_popping_and_pushing_start_popping, pps+loop );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, freelist_test_internal_thread_popping_and_pushing_start_pushing, pps+loop+cpu_count );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ lfds610_freelist_delete( (pps+loop)->local_fs, NULL, NULL );
+
+ free( pps );
+
+ vi.min_elements = vi.max_elements = 100000 * cpu_count * 2;
+
+ lfds610_freelist_query( fs, LFDS610_FREELIST_QUERY_VALIDATE, (void *) &vi, (void *) &dvs );
+
+ lfds610_freelist_delete( fs, NULL, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_popping( void *freelist_test_popping_and_pushing_state )
+{
+ struct freelist_test_popping_and_pushing_state
+ *pps;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ unsigned int
+ count;
+
+ assert( freelist_test_popping_and_pushing_state != NULL );
+
+ pps = (struct freelist_test_popping_and_pushing_state *) freelist_test_popping_and_pushing_state;
+
+ lfds610_freelist_use( pps->fs );
+ lfds610_freelist_use( pps->local_fs );
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ count = 0;
+
+ while( count < 100000 )
+ {
+ lfds610_freelist_pop( pps->fs, &fe );
+
+ if( fe != NULL )
+ {
+ lfds610_freelist_push( pps->local_fs, fe );
+ count++;
+ }
+ }
+
+ while( lfds610_freelist_pop(pps->local_fs, &fe) )
+ lfds610_freelist_push( pps->fs, fe );
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_pushing( void *freelist_test_popping_and_pushing_state )
+{
+ struct freelist_test_popping_and_pushing_state
+ *pps;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ unsigned int
+ count;
+
+ assert( freelist_test_popping_and_pushing_state != NULL );
+
+ pps = (struct freelist_test_popping_and_pushing_state *) freelist_test_popping_and_pushing_state;
+
+ lfds610_freelist_use( pps->fs );
+ lfds610_freelist_use( pps->local_fs );
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ while( lfds610_freelist_pop(pps->local_fs, &fe) )
+ lfds610_freelist_push( pps->fs, fe );
+
+ count = 0;
+
+ while( count < 1000 )
+ {
+ lfds610_freelist_pop( pps->fs, &fe );
+
+ if( fe != NULL )
+ {
+ lfds610_freelist_push( pps->local_fs, fe );
+ count++;
+ }
+ }
+ }
+
+ // TRD : now push whatever we have in our local freelist
+ while( lfds610_freelist_pop(pps->local_fs, &fe) )
+ lfds610_freelist_push( pps->fs, fe );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void freelist_test_internal_rapid_popping_and_pushing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds610_freelist_state
+ *fs;
+
+ struct lfds610_validation_info
+ vi;
+
+ enum lfds610_data_structure_validity
+ dvs;
+
+ /* TRD : in these tests there is a fundamental antagonism between
+ how much checking/memory clean up that we do and the
+ likelyhood of collisions between threads in their lock-free
+ operations
+
+ the lock-free operations are very quick; if we do anything
+ much at all between operations, we greatly reduce the chance
+ of threads colliding
+
+ so we have some tests which do enough checking/clean up that
+ they can tell the freelist is valid and don't leak memory
+ and here, this test now is one of those which does minimal
+ checking - in fact, the nature of the test is that you can't
+ do any real checking - but goes very quickly
+
+ what we do is create a small freelist and then run one thread
+ per CPU, where each thread simply pops and then immediately
+ pushes
+
+ the test runs for ten seconds
+
+ after the test is done, the only check we do is to traverse
+ the freelist, checking for loops and ensuring the number of
+ elements is correct
+ */
+
+ internal_display_test_name( "Rapid popping and pushing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_freelist_new( &fs, cpu_count, NULL, NULL );
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_rapid_popping_and_pushing, fs );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ vi.min_elements = cpu_count;
+ vi.max_elements = cpu_count;
+
+ lfds610_freelist_query( fs, LFDS610_FREELIST_QUERY_VALIDATE, (void *) &vi, (void *) &dvs );
+
+ lfds610_freelist_delete( fs, NULL, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_rapid_popping_and_pushing( void *lfds610_freelist_state )
+{
+ struct lfds610_freelist_state
+ *fs;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ assert( lfds610_freelist_state != NULL );
+
+ fs = (struct lfds610_freelist_state *) lfds610_freelist_state;
+
+ lfds610_freelist_use( fs );
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds610_freelist_pop( fs, &fe );
+ lfds610_freelist_push( fs, fe );
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds610_queue( void )
+{
+ printf( "\n"
+ "Queue Tests\n"
+ "===========\n" );
+
+ queue_test_enqueuing();
+ queue_test_dequeuing();
+ queue_test_enqueuing_and_dequeuing();
+ queue_test_rapid_enqueuing_and_dequeuing();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void queue_test_enqueuing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds610_queue_state
+ *qs;
+
+ struct queue_test_enqueuing_state
+ *qtes;
+
+ lfds610_atom_t
+ user_data,
+ thread,
+ count,
+ *per_thread_counters;
+
+ struct lfds610_validation_info
+ vi = { 1000000, 1000000 };
+
+ enum lfds610_data_structure_validity
+ dvs[2];
+
+ /* TRD : create an empty queue with 1,000,000 elements in its freelist
+ then run one thread per CPU
+ where each thread busy-works, enqueuing elements (until there are no more elements)
+ each element's void pointer of user data is (thread number | element number)
+ where element_number is a thread-local counter starting at 0
+ where the thread_number occupies the top byte
+
+ when we're done, we check that all the elements are present
+ and increment on a per-thread basis
+ */
+
+ internal_display_test_name( "Enqueuing" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_queue_new( &qs, 1000000 );
+
+ qtes = malloc( sizeof(struct queue_test_enqueuing_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (qtes+loop)->qs = qs;
+ (qtes+loop)->counter = (lfds610_atom_t) loop << (sizeof(lfds610_atom_t)*8-8);
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_simple_enqueuer, qtes+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ free( qtes );
+
+ /* TRD : first, validate the queue
+
+ then dequeue
+ we expect to find element numbers increment on a per thread basis
+ */
+
+ lfds610_queue_query( qs, LFDS610_QUEUE_QUERY_VALIDATE, &vi, dvs );
+
+ per_thread_counters = malloc( sizeof(lfds610_atom_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ while( dvs[0] == LFDS610_VALIDITY_VALID and dvs[1] == LFDS610_VALIDITY_VALID and lfds610_queue_dequeue(qs, (void *) &user_data) )
+ {
+ thread = user_data >> (sizeof(lfds610_atom_t)*8-8);
+ count = (user_data << 8) >> 8;
+
+ if( thread >= cpu_count )
+ {
+ dvs[0] = LFDS610_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( count < per_thread_counters[thread] )
+ dvs[0] = LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( count > per_thread_counters[thread] )
+ dvs[0] = LFDS610_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( count == per_thread_counters[thread] )
+ per_thread_counters[thread]++;
+ }
+
+ free( per_thread_counters );
+
+ lfds610_queue_delete( qs, NULL, NULL );
+
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_enqueuer( void *queue_test_enqueuing_state )
+{
+ struct queue_test_enqueuing_state
+ *qtes;
+
+ assert( queue_test_enqueuing_state != NULL );
+
+ qtes = (struct queue_test_enqueuing_state *) queue_test_enqueuing_state;
+
+ lfds610_queue_use( qtes->qs );
+
+ // TRD : top byte of counter is already our thread number
+ while( lfds610_queue_enqueue(qtes->qs, (void *) qtes->counter++) );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void queue_test_dequeuing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds610_queue_state
+ *qs;
+
+ struct queue_test_dequeuing_state
+ *qtds;
+
+ struct lfds610_validation_info
+ vi = { 0, 0 };
+
+ enum lfds610_data_structure_validity
+ dvs[2];
+
+ /* TRD : create a queue with 1,000,000 elements
+
+ use a single thread to enqueue every element
+ each elements user data is an incrementing counter
+
+ then run one thread per CPU
+ where each busy-works dequeuing
+
+ when an element is dequeued, we check (on a per-thread basis) the
+ value deqeued is greater than the element previously dequeued
+ */
+
+ internal_display_test_name( "Dequeuing" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_queue_new( &qs, 1000000 );
+
+ for( loop = 0 ; loop < 1000000 ; loop++ )
+ lfds610_queue_enqueue( qs, (void *) (lfds610_atom_t) loop );
+
+ qtds = malloc( sizeof(struct queue_test_dequeuing_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (qtds+loop)->qs = qs;
+ (qtds+loop)->error_flag = LOWERED;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_simple_dequeuer, qtds+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : check queue is empty
+ lfds610_queue_query( qs, LFDS610_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ // TRD : check for raised error flags
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ if( (qtds+loop)->error_flag == RAISED )
+ dvs[0] = LFDS610_VALIDITY_INVALID_TEST_DATA;
+
+ free( qtds );
+
+ lfds610_queue_delete( qs, NULL, NULL );
+
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_dequeuer( void *queue_test_dequeuing_state )
+{
+ struct queue_test_dequeuing_state
+ *qtds;
+
+ lfds610_atom_t
+ *prev_user_data,
+ *user_data;
+
+ assert( queue_test_dequeuing_state != NULL );
+
+ qtds = (struct queue_test_dequeuing_state *) queue_test_dequeuing_state;
+
+ lfds610_queue_use( qtds->qs );
+
+ lfds610_queue_dequeue( qtds->qs, (void *) &prev_user_data );
+
+ while( lfds610_queue_dequeue(qtds->qs, (void *) &user_data) )
+ {
+ if( user_data <= prev_user_data )
+ qtds->error_flag = RAISED;
+
+ prev_user_data = user_data;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void queue_test_enqueuing_and_dequeuing( void )
+{
+ unsigned int
+ loop,
+ subloop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds610_queue_state
+ *qs;
+
+ struct queue_test_enqueuing_and_dequeuing_state
+ *qteds;
+
+ struct lfds610_validation_info
+ vi = { 0, 0 };
+
+ enum lfds610_data_structure_validity
+ dvs[2];
+
+ internal_display_test_name( "Enqueuing and dequeuing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_queue_new( &qs, cpu_count );
+
+ qteds = malloc( sizeof(struct queue_test_enqueuing_and_dequeuing_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (qteds+loop)->qs = qs;
+ (qteds+loop)->thread_number = loop;
+ (qteds+loop)->counter = (lfds610_atom_t) loop << (sizeof(lfds610_atom_t)*8-8);
+ (qteds+loop)->cpu_count = cpu_count;
+ (qteds+loop)->error_flag = LOWERED;
+ (qteds+loop)->per_thread_counters = malloc( sizeof(lfds610_atom_t) * cpu_count );
+
+ for( subloop = 0 ; subloop < cpu_count ; subloop++ )
+ *((qteds+loop)->per_thread_counters+subloop) = 0;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_enqueuer_and_dequeuer, qteds+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ lfds610_queue_query( qs, LFDS610_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ if( (qteds+loop)->error_flag == RAISED )
+ dvs[0] = LFDS610_VALIDITY_INVALID_TEST_DATA;
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ free( (qteds+loop)->per_thread_counters );
+
+ free( qteds );
+
+ lfds610_queue_delete( qs, NULL, NULL );
+
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_enqueuer_and_dequeuer( void *queue_test_enqueuing_and_dequeuing_state )
+{
+ struct queue_test_enqueuing_and_dequeuing_state
+ *qteds;
+
+ time_t
+ start_time;
+
+ lfds610_atom_t
+ thread,
+ count,
+ user_data;
+
+ assert( queue_test_enqueuing_and_dequeuing_state != NULL );
+
+ qteds = (struct queue_test_enqueuing_and_dequeuing_state *) queue_test_enqueuing_and_dequeuing_state;
+
+ lfds610_queue_use( qteds->qs );
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds610_queue_enqueue( qteds->qs, (void *) (qteds->counter++) );
+ lfds610_queue_dequeue( qteds->qs, (void *) &user_data );
+
+ thread = user_data >> (sizeof(lfds610_atom_t)*8-8);
+ count = (user_data << 8) >> 8;
+
+ if( thread >= qteds->cpu_count )
+ qteds->error_flag = RAISED;
+ else
+ {
+ if( count < qteds->per_thread_counters[thread] )
+ qteds->error_flag = RAISED;
+
+ if( count >= qteds->per_thread_counters[thread] )
+ qteds->per_thread_counters[thread] = count+1;
+ }
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void queue_test_rapid_enqueuing_and_dequeuing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds610_queue_state
+ *qs;
+
+ struct queue_test_rapid_enqueuing_and_dequeuing_state
+ *qtreds;
+
+ struct lfds610_validation_info
+ vi = { 50000, 50000 };
+
+ lfds610_atom_t
+ user_data,
+ thread,
+ count,
+ *per_thread_counters;
+
+ enum lfds610_data_structure_validity
+ dvs[2];
+
+ internal_display_test_name( "Rapid enqueuing and dequeuing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_queue_new( &qs, 100000 );
+
+ for( loop = 0 ; loop < 50000 ; loop++ )
+ lfds610_queue_enqueue( qs, NULL );
+
+ qtreds = malloc( sizeof(struct queue_test_rapid_enqueuing_and_dequeuing_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (qtreds+loop)->qs = qs;
+ (qtreds+loop)->counter = (lfds610_atom_t) loop << (sizeof(lfds610_atom_t)*8-8);
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_rapid_enqueuer_and_dequeuer, qtreds+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ lfds610_queue_query( qs, LFDS610_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ // TRD : now check results
+ per_thread_counters = malloc( sizeof(lfds610_atom_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ while( dvs[0] == LFDS610_VALIDITY_VALID and dvs[1] == LFDS610_VALIDITY_VALID and lfds610_queue_dequeue(qs, (void *) &user_data) )
+ {
+ thread = user_data >> (sizeof(lfds610_atom_t)*8-8);
+ count = (user_data << 8) >> 8;
+
+ if( thread >= cpu_count )
+ {
+ dvs[0] = LFDS610_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( per_thread_counters[thread] == 0 )
+ per_thread_counters[thread] = count;
+
+ if( count < per_thread_counters[thread] )
+ dvs[0] = LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( count >= per_thread_counters[thread] )
+ per_thread_counters[thread] = count+1;
+ }
+
+ free( per_thread_counters );
+
+ free( qtreds );
+
+ lfds610_queue_delete( qs, NULL, NULL );
+
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_rapid_enqueuer_and_dequeuer( void *queue_test_rapid_enqueuing_and_dequeuing_state )
+{
+ struct queue_test_rapid_enqueuing_and_dequeuing_state
+ *qtreds;
+
+ time_t
+ start_time;
+
+ lfds610_atom_t
+ user_data;
+
+ assert( queue_test_rapid_enqueuing_and_dequeuing_state != NULL );
+
+ qtreds = (struct queue_test_rapid_enqueuing_and_dequeuing_state *) queue_test_rapid_enqueuing_and_dequeuing_state;
+
+ lfds610_queue_use( qtreds->qs );
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds610_queue_enqueue( qtreds->qs, (void *) (qtreds->counter++) );
+ lfds610_queue_dequeue( qtreds->qs, (void *) &user_data );
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds610_ringbuffer( void )
+{
+ printf( "\n"
+ "Ringbuffer Tests\n"
+ "================\n" );
+
+ ringbuffer_test_reading();
+ ringbuffer_test_writing();
+ ringbuffer_test_reading_and_writing();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void ringbuffer_test_reading( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds610_ringbuffer_state
+ *rs;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ struct ringbuffer_test_reading_state
+ *rtrs;
+
+ struct lfds610_validation_info
+ vi = { 0, 0 };
+
+ enum lfds610_data_structure_validity
+ dvs[3];
+
+ lfds610_atom_t
+ total_read = 0;
+
+ /* TRD : we create a single ringbuffer
+ with 1,000,000 elements
+ we populate the ringbuffer, where the
+ user data is an incrementing counter
+
+ we create one thread per CPU
+ where each thread busy-works,
+ reading until the ringbuffer is empty
+
+ each thread keeps track of the number of reads it manages
+ and that each user data it reads is greater than the
+ previous user data that was read
+ */
+
+ internal_display_test_name( "Reading" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_ringbuffer_new( &rs, 1000000, NULL, NULL );
+
+ for( loop = 0 ; loop < 1000000 ; loop++ )
+ {
+ lfds610_ringbuffer_get_write_element( rs, &fe, NULL );
+ lfds610_freelist_set_user_data_in_element( fe, (void *) (lfds610_atom_t) loop );
+ lfds610_ringbuffer_put_write_element( rs, fe );
+ }
+
+ rtrs = malloc( sizeof(struct ringbuffer_test_reading_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (rtrs+loop)->rs = rs;
+ (rtrs+loop)->read_count = 0;
+ (rtrs+loop)->error_flag = LOWERED;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_simple_reader, rtrs+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ lfds610_ringbuffer_query( rs, LFDS610_RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ // TRD : check for raised error flags
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ if( (rtrs+loop)->error_flag == RAISED )
+ dvs[0] = LFDS610_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : check thread reads total to 1,000,000
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ total_read += (rtrs+loop)->read_count;
+
+ if( total_read < 1000000 )
+ dvs[0] = LFDS610_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( total_read > 1000000 )
+ dvs[0] = LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ free( rtrs );
+
+ lfds610_ringbuffer_delete( rs, NULL, NULL );
+
+ internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_reader( void *ringbuffer_test_reading_state )
+{
+ struct ringbuffer_test_reading_state
+ *rtrs;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ lfds610_atom_t
+ *prev_user_data,
+ *user_data;
+
+ assert( ringbuffer_test_reading_state != NULL );
+
+ rtrs = (struct ringbuffer_test_reading_state *) ringbuffer_test_reading_state;
+
+ lfds610_ringbuffer_use( rtrs->rs );
+
+ /* TRD : read an initial element to load a value into prev_user_data
+ it may be (under valgrind for example) that by the time we start
+ there are no elements remaining to read
+ */
+
+ lfds610_ringbuffer_get_read_element( rtrs->rs, &fe );
+ if( fe == NULL )
+ return( (thread_return_t) EXIT_SUCCESS );
+ lfds610_freelist_get_user_data_from_element( fe, (void **) &prev_user_data );
+ lfds610_ringbuffer_put_read_element( rtrs->rs, fe );
+
+ rtrs->read_count++;
+
+ while( lfds610_ringbuffer_get_read_element(rtrs->rs, &fe) )
+ {
+ lfds610_freelist_get_user_data_from_element( fe, (void **) &user_data );
+ lfds610_ringbuffer_put_read_element( rtrs->rs, fe );
+
+ if( user_data <= prev_user_data )
+ rtrs->error_flag = RAISED;
+
+ prev_user_data = user_data;
+
+ rtrs->read_count++;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void ringbuffer_test_writing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds610_ringbuffer_state
+ *rs;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ struct ringbuffer_test_writing_state
+ *rtws;
+
+ struct lfds610_validation_info
+ vi = { 100000, 100000 };
+
+ enum lfds610_data_structure_validity
+ dvs[3];
+
+ lfds610_atom_t
+ thread,
+ count,
+ user_data,
+ *per_thread_counters;
+
+ /* TRD : we create a single ringbuffer
+ with 100000 elements
+ the ringbuffers starts empty
+
+ we create one thread per CPU
+ where each thread busy-works writing
+ for ten seconds
+
+ the user data in each written element is a combination
+ of the thread number and the counter
+
+ after the threads are complete, we validate by
+ checking the user data counters increment on a per thread
+ basis
+ */
+
+ internal_display_test_name( "Writing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_ringbuffer_new( &rs, 100000, NULL, NULL );
+
+ rtws = malloc( sizeof(struct ringbuffer_test_writing_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (rtws+loop)->rs = rs;
+ (rtws+loop)->write_count = (lfds610_atom_t) loop << (sizeof(lfds610_atom_t)*8-8);
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_simple_writer, rtws+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : now check results
+ per_thread_counters = malloc( sizeof(lfds610_atom_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ lfds610_ringbuffer_query( rs, LFDS610_RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ while( dvs[0] == LFDS610_VALIDITY_VALID and dvs[1] == LFDS610_VALIDITY_VALID and dvs[2] == LFDS610_VALIDITY_VALID and lfds610_ringbuffer_get_read_element(rs, &fe) )
+ {
+ lfds610_freelist_get_user_data_from_element( fe, (void *) &user_data );
+
+ thread = user_data >> (sizeof(lfds610_atom_t)*8-8);
+ count = (user_data << 8) >> 8;
+
+ if( thread >= cpu_count )
+ {
+ dvs[0] = LFDS610_VALIDITY_INVALID_TEST_DATA;
+ lfds610_ringbuffer_put_read_element( rs, fe );
+ break;
+ }
+
+ if( per_thread_counters[thread] == 0 )
+ per_thread_counters[thread] = count;
+
+ if( count < per_thread_counters[thread] )
+ dvs[0] = LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( count >= per_thread_counters[thread] )
+ per_thread_counters[thread] = count+1;
+
+ lfds610_ringbuffer_put_read_element( rs, fe );
+ }
+
+ free( per_thread_counters );
+
+ free( rtws );
+
+ lfds610_ringbuffer_delete( rs, NULL, NULL );
+
+ internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_writer( void *ringbuffer_test_writing_state )
+{
+ struct ringbuffer_test_writing_state
+ *rtws;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ time_t
+ start_time;
+
+ assert( ringbuffer_test_writing_state != NULL );
+
+ rtws = (struct ringbuffer_test_writing_state *) ringbuffer_test_writing_state;
+
+ lfds610_ringbuffer_use( rtws->rs );
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds610_ringbuffer_get_write_element( rtws->rs, &fe, NULL );
+ lfds610_freelist_set_user_data_in_element( fe, (void *) (lfds610_atom_t) (rtws->write_count++) );
+ lfds610_ringbuffer_put_write_element( rtws->rs, fe );
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void ringbuffer_test_reading_and_writing( void )
+{
+ unsigned int
+ loop,
+ subloop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds610_ringbuffer_state
+ *rs;
+
+ struct ringbuffer_test_reading_and_writing_state
+ *rtrws;
+
+ struct lfds610_validation_info
+ vi = { 0, 0 };
+
+ enum lfds610_data_structure_validity
+ dvs[3];
+
+ /* TRD : we create a single ringbuffer
+ with 100000 elements
+ the ringbuffers starts empty
+
+ we create one thread per CPU
+ where each thread busy-works writing
+ and then immediately reading
+ for ten seconds
+
+ the user data in each written element is a combination
+ of the thread number and the counter
+
+ while a thread runs, it keeps track of the
+ counters for the other threads and throws an error
+ if it sees the number stay the same or decrease
+ */
+
+ internal_display_test_name( "Reading and writing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_ringbuffer_new( &rs, 100000, NULL, NULL );
+
+ rtrws = malloc( sizeof(struct ringbuffer_test_reading_and_writing_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (rtrws+loop)->rs = rs;
+ (rtrws+loop)->counter = (lfds610_atom_t) loop << (sizeof(lfds610_atom_t)*8-8);
+ (rtrws+loop)->cpu_count = cpu_count;
+ (rtrws+loop)->error_flag = LOWERED;
+ (rtrws+loop)->per_thread_counters = malloc( sizeof(lfds610_atom_t) * cpu_count );
+
+ for( subloop = 0 ; subloop < cpu_count ; subloop++ )
+ *((rtrws+loop)->per_thread_counters+subloop) = 0;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_reader_writer, rtrws+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ lfds610_ringbuffer_query( rs, LFDS610_RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ if( (rtrws+loop)->error_flag == RAISED )
+ dvs[0] = LFDS610_VALIDITY_INVALID_TEST_DATA;
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ free( (rtrws+loop)->per_thread_counters );
+
+ free( rtrws );
+
+ lfds610_ringbuffer_delete( rs, NULL, NULL );
+
+ internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION ringbuffer_test_thread_reader_writer( void *ringbuffer_test_reading_and_writing_state )
+{
+ struct ringbuffer_test_reading_and_writing_state
+ *rtrws;
+
+ struct lfds610_freelist_element
+ *fe;
+
+ lfds610_atom_t
+ user_data,
+ thread,
+ count;
+
+ time_t
+ start_time;
+
+ assert( ringbuffer_test_reading_and_writing_state != NULL );
+
+ rtrws = (struct ringbuffer_test_reading_and_writing_state *) ringbuffer_test_reading_and_writing_state;
+
+ lfds610_ringbuffer_use( rtrws->rs );
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds610_ringbuffer_get_write_element( rtrws->rs, &fe, NULL );
+ lfds610_freelist_set_user_data_in_element( fe, (void *) (lfds610_atom_t) (rtrws->counter++) );
+ lfds610_ringbuffer_put_write_element( rtrws->rs, fe );
+
+ lfds610_ringbuffer_get_read_element( rtrws->rs, &fe );
+ lfds610_freelist_get_user_data_from_element( fe, (void *) &user_data );
+
+ thread = user_data >> (sizeof(lfds610_atom_t)*8-8);
+ count = (user_data << 8) >> 8;
+
+ if( thread >= rtrws->cpu_count )
+ rtrws->error_flag = RAISED;
+ else
+ {
+ if( count < rtrws->per_thread_counters[thread] )
+ rtrws->error_flag = RAISED;
+
+ if( count >= rtrws->per_thread_counters[thread] )
+ rtrws->per_thread_counters[thread] = count+1;
+ }
+
+ lfds610_ringbuffer_put_read_element( rtrws->rs, fe );
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds610_slist( void )
+{
+ printf( "\n"
+ "SList Tests\n"
+ "===========\n" );
+
+ test_slist_new_delete_get();
+ test_slist_get_set_user_data();
+ test_slist_delete_all_elements();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void test_slist_new_delete_get( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ struct lfds610_slist_state
+ *ss;
+
+ struct lfds610_slist_element
+ *se = NULL;
+
+ struct slist_test_state
+ *sts;
+
+ thread_state_t
+ *thread_handles;
+
+ size_t
+ total_create_count = 0,
+ total_delete_count = 0,
+ element_count = 0;
+
+ enum lfds610_data_structure_validity
+ dvs = LFDS610_VALIDITY_VALID;
+
+ /* TRD : two threads per CPU
+ first simply alternates between new_head() and new_next() (next on element created by head)
+ second calls get_next, if NULL, then calls get_head, and deletes the element
+ both threads keep count of created and deleted
+ validate is to reconcile created, deleted and remaining in list
+ */
+
+ internal_display_test_name( "New head/next, delete and get next" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_slist_new( &ss, NULL, NULL );
+
+ sts = malloc( sizeof(struct slist_test_state) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ {
+ (sts+loop)->ss = ss;
+ (sts+loop)->create_count = 0;
+ (sts+loop)->delete_count = 0;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, slist_test_internal_thread_new_delete_get_new_head_and_next, sts+loop );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, slist_test_internal_thread_new_delete_get_delete_and_get, sts+loop+cpu_count );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : now validate
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ {
+ total_create_count += (sts+loop)->create_count;
+ total_delete_count += (sts+loop)->delete_count;
+ }
+
+ while( NULL != lfds610_slist_get_head_and_then_next(ss, &se) )
+ element_count++;
+
+ if( total_create_count - total_delete_count - element_count != 0 )
+ dvs = LFDS610_VALIDITY_INVALID_TEST_DATA;
+
+ free( sts );
+
+ lfds610_slist_delete( ss );
+
+ internal_display_test_result( 1, "slist", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION slist_test_internal_thread_new_delete_get_new_head_and_next( void *slist_test_state )
+{
+ struct slist_test_state
+ *sts;
+
+ time_t
+ start_time;
+
+ struct lfds610_slist_element
+ *se = NULL;
+
+ assert( slist_test_state != NULL );
+
+ sts = (struct slist_test_state *) slist_test_state;
+
+ lfds610_slist_use( sts->ss );
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 1 )
+ {
+ if( sts->create_count % 2 == 0 )
+ se = lfds610_slist_new_head( sts->ss, NULL );
+ else
+ lfds610_slist_new_next( se, NULL );
+
+ sts->create_count++;
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION slist_test_internal_thread_new_delete_get_delete_and_get( void *slist_test_state )
+{
+ struct slist_test_state
+ *sts;
+
+ time_t
+ start_time;
+
+ struct lfds610_slist_element
+ *se = NULL;
+
+ assert( slist_test_state != NULL );
+
+ sts = (struct slist_test_state *) slist_test_state;
+
+ lfds610_slist_use( sts->ss );
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 1 )
+ {
+ if( se == NULL )
+ lfds610_slist_get_head( sts->ss, &se );
+ else
+ lfds610_slist_get_next( se, &se );
+
+ if( se != NULL )
+ {
+ if( 1 == lfds610_slist_logically_delete_element(sts->ss, se) )
+ sts->delete_count++;
+ }
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void test_slist_get_set_user_data( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ struct lfds610_slist_state
+ *ss;
+
+ struct lfds610_slist_element
+ *se = NULL;
+
+ struct slist_test_state
+ *sts;
+
+ thread_state_t
+ *thread_handles;
+
+ lfds610_atom_t
+ thread_and_count,
+ thread,
+ count,
+ *per_thread_counters,
+ *per_thread_drop_flags;
+
+ enum lfds610_data_structure_validity
+ dvs = LFDS610_VALIDITY_VALID;
+
+ /* TRD : create a list of (cpu_count*10) elements, user data 0
+ one thread per CPU
+ each thread loops, setting user_data to ((thread_number << (sizeof(lfds610_atom_t)*8-8)) | count)
+ validation is to scan list, count on a per thread basis should go down only once
+ */
+
+ internal_display_test_name( "Get and set user data" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_slist_new( &ss, NULL, NULL );
+
+ for( loop = 0 ; loop < cpu_count * 10 ; loop++ )
+ lfds610_slist_new_head( ss, NULL );
+
+ sts = malloc( sizeof(struct slist_test_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (sts+loop)->ss = ss;
+ (sts+loop)->thread_and_count = (lfds610_atom_t) loop << (sizeof(lfds610_atom_t)*8-8);
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, slist_test_internal_thread_get_set_user_data, sts+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // now validate
+ per_thread_counters = malloc( sizeof(lfds610_atom_t) * cpu_count );
+ per_thread_drop_flags = malloc( sizeof(lfds610_atom_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ *(per_thread_counters+loop) = 0;
+ *(per_thread_drop_flags+loop) = 0;
+ }
+
+ while( dvs == LFDS610_VALIDITY_VALID and NULL != lfds610_slist_get_head_and_then_next(ss, &se) )
+ {
+ lfds610_slist_get_user_data_from_element( se, (void **) &thread_and_count );
+
+ thread = thread_and_count >> (sizeof(lfds610_atom_t)*8-8);
+ count = (thread_and_count << 8) >> 8;
+
+ if( thread >= cpu_count )
+ {
+ dvs = LFDS610_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( per_thread_counters[thread] == 0 )
+ {
+ per_thread_counters[thread] = count;
+ continue;
+ }
+
+ per_thread_counters[thread]++;
+
+ if( count < per_thread_counters[thread] and per_thread_drop_flags[thread] == 1 )
+ {
+ dvs = LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ break;
+ }
+
+ if( count < per_thread_counters[thread] and per_thread_drop_flags[thread] == 0 )
+ {
+ per_thread_drop_flags[thread] = 1;
+ per_thread_counters[thread] = count;
+ continue;
+ }
+
+ if( count < per_thread_counters[thread] )
+ dvs = LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( count >= per_thread_counters[thread] )
+ per_thread_counters[thread] = count;
+ }
+
+ free( per_thread_drop_flags );
+ free( per_thread_counters );
+
+ free( sts );
+
+ lfds610_slist_delete( ss );
+
+ internal_display_test_result( 1, "slist", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION slist_test_internal_thread_get_set_user_data( void *slist_test_state )
+{
+ struct slist_test_state
+ *sts;
+
+ time_t
+ start_time;
+
+ struct lfds610_slist_element
+ *se = NULL;
+
+ assert( slist_test_state != NULL );
+
+ sts = (struct slist_test_state *) slist_test_state;
+
+ lfds610_slist_use( sts->ss );
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 1 )
+ {
+ if( se == NULL )
+ lfds610_slist_get_head( sts->ss, &se );
+
+ lfds610_slist_set_user_data_in_element( se, (void *) sts->thread_and_count++ );
+
+ lfds610_slist_get_next( se, &se );
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void test_slist_delete_all_elements( void )
+{
+ struct lfds610_slist_state
+ *ss;
+
+ struct lfds610_slist_element
+ *se = NULL;
+
+ size_t
+ element_count = 0;
+
+ unsigned int
+ loop;
+
+ enum lfds610_data_structure_validity
+ dvs = LFDS610_VALIDITY_VALID;
+
+ /* TRD : this test creates a list of 100,000 elements
+ then simply calls delete_all_elements()
+ we then count the number of elements remaining
+ should be zero :-)
+ */
+
+ internal_display_test_name( "Delete all elements" );
+
+ lfds610_slist_new( &ss, NULL, NULL );
+
+ for( loop = 0 ; loop < 1000000 ; loop++ )
+ lfds610_slist_new_head( ss, NULL );
+
+ lfds610_slist_single_threaded_physically_delete_all_elements( ss );
+
+ while( NULL != lfds610_slist_get_head_and_then_next(ss, &se) )
+ element_count++;
+
+ if( element_count != 0 )
+ dvs = LFDS610_VALIDITY_INVALID_TEST_DATA;
+
+ lfds610_slist_delete( ss );
+
+ internal_display_test_result( 1, "slist", dvs );
+
+ return;
+}
+
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds610_stack( void )
+{
+ printf( "\n"
+ "Stack Tests\n"
+ "===========\n" );
+
+ stack_test_internal_popping();
+ stack_test_internal_pushing();
+ stack_test_internal_popping_and_pushing();
+ stack_test_internal_rapid_popping_and_pushing();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void stack_test_internal_popping( void )
+{
+ unsigned int
+ loop,
+ *found_count,
+ cpu_count;
+
+ lfds610_atom_t
+ count;
+
+ thread_state_t
+ *thread_handles;
+
+ enum lfds610_data_structure_validity
+ dvs = LFDS610_VALIDITY_VALID;
+
+ struct lfds610_stack_state
+ *ss;
+
+ struct stack_test_popping_state
+ *stps;
+
+ /* TRD : we create a stack with 1,000,000 elements
+
+ we then populate the stack, where each element is
+ set to contain a void pointer which is its element number
+
+ we then run one thread per CPU
+ where each thread loops, popping as quickly as possible
+ each popped element is pushed onto a thread-local stack
+
+ the threads run till the source stack is empty
+
+ we then check the thread-local stacks
+ we should find we have every element
+
+ then tidy up
+ */
+
+ internal_display_test_name( "Popping" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_stack_new( &ss, 1000000 );
+
+ for( loop = 0 ; loop < 1000000 ; loop++ )
+ lfds610_stack_push( ss, (void *) (lfds610_atom_t) loop );
+
+ stps = malloc( sizeof(struct stack_test_popping_state) * cpu_count );
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (stps+loop)->ss = ss;
+ lfds610_stack_new( &(stps+loop)->ss_thread_local, 1000000 );
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, stack_test_internal_thread_popping, stps+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : now we check the thread-local stacks
+ found_count = malloc( sizeof(unsigned int) * 1000000 );
+ for( loop = 0 ; loop < 1000000 ; loop++ )
+ *(found_count+loop) = 0;
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ while( lfds610_stack_pop((stps+loop)->ss_thread_local, (void **) &count) )
+ (*(found_count+count))++;
+
+ for( loop = 0 ; loop < 1000000 and dvs == LFDS610_VALIDITY_VALID ; loop++ )
+ {
+ if( *(found_count+loop) == 0 )
+ dvs = LFDS610_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( *(found_count+loop) > 1 )
+ dvs = LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ // TRD : cleanup
+ free( found_count );
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ lfds610_stack_delete( (stps+loop)->ss_thread_local, NULL, NULL );
+ free( stps );
+ lfds610_stack_delete( ss, NULL, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "stack", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION stack_test_internal_thread_popping( void *stack_test_popping_state )
+{
+ struct stack_test_popping_state
+ *stps;
+
+ lfds610_atom_t
+ count;
+
+ assert( stack_test_popping_state != NULL );
+
+ stps = (struct stack_test_popping_state *) stack_test_popping_state;
+
+ lfds610_stack_use( stps->ss );
+
+ while( lfds610_stack_pop(stps->ss, (void **) &count) )
+ lfds610_stack_push( stps->ss_thread_local, (void *) count );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void stack_test_internal_pushing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ enum lfds610_data_structure_validity
+ dvs[2];
+
+ struct stack_test_pushing_state
+ *stps;
+
+ struct lfds610_stack_state
+ *ss;
+
+ lfds610_atom_t
+ user_data,
+ thread,
+ count,
+ *per_thread_counters;
+
+ struct lfds610_validation_info
+ vi = { 1000000, 1000000 };
+
+ /* TRD : we create a stack with 1,000,000 elements
+
+ we then create one thread per CPU, where each thread
+ pushes as quickly as possible to the stack
+
+ the data pushed is a counter and a thread ID
+
+ the threads exit when the stack is full
+
+ we then validate the stack;
+
+ checking that the counts increment on a per unique ID basis
+ and that the number of elements we pop equals 1,000,000
+ (since each element has an incrementing counter which is
+ unique on a per unique ID basis, we can know we didn't lose
+ any elements)
+ */
+
+ internal_display_test_name( "Pushing" );
+
+ cpu_count = abstraction_cpu_count();
+
+ stps = malloc( sizeof(struct stack_test_pushing_state) * cpu_count );
+
+ // TRD : the main stack
+ lfds610_stack_new( &ss, 1000000 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (stps+loop)->thread_number = (lfds610_atom_t) loop;
+ (stps+loop)->ss = ss;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, stack_test_internal_thread_pushing, stps+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : the stack is now fully pushed; time to verify
+ per_thread_counters = malloc( sizeof(lfds610_atom_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ *(per_thread_counters+loop) = 1000000;
+
+ lfds610_stack_query( ss, LFDS610_STACK_QUERY_VALIDATE, &vi, (void *) dvs );
+
+ while( dvs[0] == LFDS610_VALIDITY_VALID and lfds610_stack_pop(ss, (void **) &user_data) )
+ {
+ thread = user_data >> (sizeof(lfds610_atom_t)*8-8);
+ count = (user_data << 8) >> 8;
+
+ if( thread >= cpu_count )
+ {
+ dvs[0] = LFDS610_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( count > per_thread_counters[thread] )
+ dvs[0] = LFDS610_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( count < per_thread_counters[thread] )
+ per_thread_counters[thread] = count-1;
+ }
+
+ // TRD : clean up
+ free( per_thread_counters );
+
+ free( stps );
+
+ lfds610_stack_delete( ss, NULL, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 2, "stack", dvs[0], "stack freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION stack_test_internal_thread_pushing( void *stack_test_pushing_state )
+{
+ struct stack_test_pushing_state
+ *stps;
+
+ lfds610_atom_t
+ counter = 0;
+
+ assert( stack_test_pushing_state != NULL );
+
+ stps = (struct stack_test_pushing_state *) stack_test_pushing_state;
+
+ lfds610_stack_use( stps->ss );
+
+ // TRD : we write (thread_number | counter), where thread_number is the top 8 bits of the lfds610_atom_t
+ while( lfds610_stack_push(stps->ss, (void **) ((stps->thread_number << (sizeof(lfds610_atom_t)*8-8)) | counter++)) );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void stack_test_internal_popping_and_pushing( void )
+{
+ unsigned int
+ loop,
+ subloop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ enum lfds610_data_structure_validity
+ dvs[2];
+
+ struct lfds610_stack_state
+ *ss;
+
+ struct stack_test_popping_and_pushing_state
+ *stpps;
+
+ struct lfds610_validation_info
+ vi;
+
+ /* TRD : we have two threads per CPU
+ the threads loop for ten seconds
+ the first thread pushes 100000 elements then pops 100000 elements
+ the second thread pops 100000 elements then pushes 100000 elements
+ all pushes and pops go onto the single main stack
+
+ after time is up, all threads push what they have remaining onto
+ the main stack
+
+ we then validate the main stack
+ */
+
+ internal_display_test_name( "Popping and pushing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ // TRD : just some initial elements so the pushing threads can start immediately
+ lfds610_stack_new( &ss, 100000 * cpu_count * 2 );
+
+ for( loop = 0 ; loop < 100000 * cpu_count ; loop++ )
+ lfds610_stack_push( ss, (void *) (lfds610_atom_t) loop );
+
+ stpps = malloc( sizeof(struct stack_test_popping_and_pushing_state) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (stpps+loop)->ss = ss;
+ lfds610_stack_new( &(stpps+loop)->local_ss, 100000 );
+
+ (stpps+loop+cpu_count)->ss = ss;
+ lfds610_stack_new( &(stpps+loop+cpu_count)->local_ss, 100000 );
+
+ // TRD : fill the pushing thread stacks
+ for( subloop = 0 ; subloop < 100000 ; subloop++ )
+ lfds610_stack_push( (stpps+loop+cpu_count)->local_ss, (void *) (lfds610_atom_t) subloop );
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count * 2 );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ abstraction_thread_start( &thread_handles[loop], loop, stack_test_internal_thread_popping_and_pushing_start_popping, stpps+loop );
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, stack_test_internal_thread_popping_and_pushing_start_pushing, stpps+loop+cpu_count );
+ }
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )
+ lfds610_stack_delete( (stpps+loop)->local_ss, NULL, NULL );
+
+ free( stpps );
+
+ vi.min_elements = vi.max_elements = 100000 * cpu_count * 2;
+
+ lfds610_stack_query( ss, LFDS610_STACK_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ lfds610_stack_delete( ss, NULL, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 2, "stack", dvs[0], "stack freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION stack_test_internal_thread_popping_and_pushing_start_popping( void *stack_test_popping_and_pushing_state )
+{
+ struct stack_test_popping_and_pushing_state
+ *stpps;
+
+ void
+ *user_data;
+
+ time_t
+ start_time;
+
+ unsigned int
+ count;
+
+ assert( stack_test_popping_and_pushing_state != NULL );
+
+ stpps = (struct stack_test_popping_and_pushing_state *) stack_test_popping_and_pushing_state;
+
+ lfds610_stack_use( stpps->ss );
+ lfds610_stack_use( stpps->local_ss );
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ count = 0;
+
+ while( count < 100000 )
+ if( lfds610_stack_pop(stpps->ss, &user_data) )
+ {
+ lfds610_stack_push( stpps->local_ss, user_data );
+ count++;
+ }
+
+ // TRD : return our local stack to the main stack
+ while( lfds610_stack_pop(stpps->local_ss, &user_data) )
+ lfds610_stack_push( stpps->ss, user_data );
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION stack_test_internal_thread_popping_and_pushing_start_pushing( void *stack_test_popping_and_pushing_state )
+{
+ struct stack_test_popping_and_pushing_state
+ *stpps;
+
+ void
+ *user_data;
+
+ time_t
+ start_time;
+
+ unsigned int
+ count;
+
+ assert( stack_test_popping_and_pushing_state != NULL );
+
+ stpps = (struct stack_test_popping_and_pushing_state *) stack_test_popping_and_pushing_state;
+
+ lfds610_stack_use( stpps->ss );
+ lfds610_stack_use( stpps->local_ss );
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ // TRD : return our local stack to the main stack
+ while( lfds610_stack_pop(stpps->local_ss, &user_data) )
+ lfds610_stack_push( stpps->ss, user_data );
+
+ count = 0;
+
+ while( count < 100000 )
+ if( lfds610_stack_pop(stpps->ss, &user_data) )
+ {
+ lfds610_stack_push( stpps->local_ss, user_data );
+ count++;
+ }
+ }
+
+ // TRD : now push whatever we have in our local stack
+ while( lfds610_stack_pop(stpps->local_ss, &user_data) )
+ lfds610_stack_push( stpps->ss, user_data );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void stack_test_internal_rapid_popping_and_pushing( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct lfds610_stack_state
+ *ss;
+
+ struct lfds610_validation_info
+ vi;
+
+ enum lfds610_data_structure_validity
+ dvs[2];
+
+ /* TRD : in these tests there is a fundamental antagonism between
+ how much checking/memory clean up that we do and the
+ likelyhood of collisions between threads in their lock-free
+ operations
+
+ the lock-free operations are very quick; if we do anything
+ much at all between operations, we greatly reduce the chance
+ of threads colliding
+
+ so we have some tests which do enough checking/clean up that
+ they can tell the stack is valid and don't leak memory
+ and here, this test now is one of those which does minimal
+ checking - in fact, the nature of the test is that you can't
+ do any real checking - but goes very quickly
+
+ what we do is create a small stack and then run one thread
+ per CPU, where each thread simply pushes and then immediately
+ pops
+
+ the test runs for ten seconds
+
+ after the test is done, the only check we do is to traverse
+ the stack, checking for loops and ensuring the number of
+ elements is correct
+ */
+
+ internal_display_test_name( "Rapid popping and pushing (10 seconds)" );
+
+ cpu_count = abstraction_cpu_count();
+
+ lfds610_stack_new( &ss, cpu_count );
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, stack_test_internal_thread_rapid_popping_and_pushing, ss );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ vi.min_elements = 0;
+ vi.max_elements = 0;
+
+ lfds610_stack_query( ss, LFDS610_STACK_QUERY_VALIDATE, (void *) &vi, (void *) dvs );
+
+ lfds610_stack_delete( ss, NULL, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 2, "stack", dvs[0], "stack freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION stack_test_internal_thread_rapid_popping_and_pushing( void *stack_state )
+{
+ struct lfds610_stack_state
+ *ss;
+
+ void
+ *user_data = NULL;
+
+ time_t
+ start_time;
+
+ assert( stack_state != NULL );
+
+ ss = (struct lfds610_stack_state *) stack_state;
+
+ lfds610_stack_use( ss );
+
+ time( &start_time );
+
+ while( time(NULL) < start_time + 10 )
+ {
+ lfds610_stack_push( ss, user_data );
+ lfds610_stack_pop( ss, &user_data );
+ }
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+\r
+Microsoft Visual Studio Solution File, Format Version 10.00\r
+# Visual Studio 2008\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test", "test.vcproj", "{6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}"\r
+ ProjectSection(ProjectDependencies) = postProject\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05} = {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}\r
+ EndProjectSection\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblfds610", "..\liblfds610\liblfds610.vcproj", "{F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug DLL|Win32 = Debug DLL|Win32\r
+ Debug DLL|x64 = Debug DLL|x64\r
+ Debug Lib|Win32 = Debug Lib|Win32\r
+ Debug Lib|x64 = Debug Lib|x64\r
+ Debug|Win32 = Debug|Win32\r
+ Debug|x64 = Debug|x64\r
+ Release DLL|Win32 = Release DLL|Win32\r
+ Release DLL|x64 = Release DLL|x64\r
+ Release Lib|Win32 = Release Lib|Win32\r
+ Release Lib|x64 = Release Lib|x64\r
+ Release|Win32 = Release|Win32\r
+ Release|x64 = Release|x64\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug DLL|Win32.ActiveCfg = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug DLL|Win32.Build.0 = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug DLL|x64.ActiveCfg = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug DLL|x64.Build.0 = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug Lib|Win32.ActiveCfg = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug Lib|Win32.Build.0 = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug Lib|x64.ActiveCfg = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug Lib|x64.Build.0 = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|Win32.ActiveCfg = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|Win32.Build.0 = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|x64.ActiveCfg = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|x64.Build.0 = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release DLL|Win32.ActiveCfg = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release DLL|Win32.Build.0 = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release DLL|x64.ActiveCfg = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release DLL|x64.Build.0 = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release Lib|Win32.ActiveCfg = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release Lib|Win32.Build.0 = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release Lib|x64.ActiveCfg = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release Lib|x64.Build.0 = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|Win32.ActiveCfg = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|Win32.Build.0 = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|x64.ActiveCfg = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|x64.Build.0 = Release|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.Build.0 = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.ActiveCfg = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.Build.0 = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.ActiveCfg = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.Build.0 = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.ActiveCfg = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.Build.0 = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|Win32.ActiveCfg = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|Win32.Build.0 = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|x64.ActiveCfg = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|x64.Build.0 = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.ActiveCfg = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.Build.0 = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.ActiveCfg = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.Build.0 = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.ActiveCfg = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.Build.0 = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.ActiveCfg = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.Build.0 = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|Win32.ActiveCfg = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|Win32.Build.0 = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|x64.ActiveCfg = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|x64.Build.0 = Release Lib|x64\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+<?xml version="1.0" encoding="Windows-1252"?>\r
+<VisualStudioProject\r
+ ProjectType="Visual C++"\r
+ Version="9.00"\r
+ Name="test"\r
+ ProjectGUID="{6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}"\r
+ RootNamespace="test"\r
+ TargetFrameworkVersion="196613"\r
+ >\r
+ <Platforms>\r
+ <Platform\r
+ Name="Win32"\r
+ />\r
+ <Platform\r
+ Name="x64"\r
+ />\r
+ </Platforms>\r
+ <ToolFiles>\r
+ </ToolFiles>\r
+ <Configurations>\r
+ <Configuration\r
+ Name="Debug|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds610\inc""\r
+ MinimalRebuild="true"\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ RuntimeLibrary="1"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="4"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="true"\r
+ SubSystem="1"\r
+ TargetMachine="1"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds610\inc""\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="1"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="true"\r
+ SubSystem="1"\r
+ TargetMachine="17"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds610\inc""\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="0"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="false"\r
+ SubSystem="1"\r
+ OptimizeReferences="2"\r
+ EnableCOMDATFolding="2"\r
+ TargetMachine="1"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds610\inc""\r
+ ExceptionHandling="0"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="false"\r
+ SubSystem="1"\r
+ OptimizeReferences="2"\r
+ EnableCOMDATFolding="2"\r
+ TargetMachine="17"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ </Configurations>\r
+ <References>\r
+ </References>\r
+ <Files>\r
+ <Filter\r
+ Name="src"\r
+ Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"\r
+ UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"\r
+ >\r
+ <File\r
+ RelativePath=".\src\abstraction.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\abstraction_cpu_count.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\abstraction_thread_start.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\abstraction_thread_wait.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_freelist.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_queue.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_ringbuffer.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_stack.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\main.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\misc.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\structures.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_abstraction.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_freelist.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_queue.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_ringbuffer.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_slist.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_stack.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ </Files>\r
+ <Globals>\r
+ </Globals>\r
+</VisualStudioProject>\r
--- /dev/null
+building liblfds\r
+================\r
+\r
+Windows (user-mode)\r
+===================\r
+1. Use Microsoft Visual Studio 2008 or Visual C++ 2008 Express Edition (or\r
+ later versions) to load "liblfds.sln". The "Win32" platform is x86,\r
+ the "x64" platform is x64.\r
+\r
+2. Use Microsoft Windows SDK and GNUmake to run makefile.windows (obviously\r
+ you'll need to have run setenv.bat or the appropriate vcvars*.bat first;\r
+ you can build for x64/64-bit and x86/32-bit - just run the correct batch\r
+ file).\r
+\r
+ Targets are "librel", "libdbg", "dllrel", "dlldbg" and "clean". You need\r
+ to clean between switching targets.\r
+\r
+Windows (kernel)\r
+================\r
+Use the Windows Driver Kit "build" command. Prior to running "build",\r
+if you wish to build a static library, run the batch file\r
+"runme_before_win_kernel_static_lib_build.bat"; if you wish to\r
+build a dynamic library, instead run "runme_before_win_kernel_dynamic_lib_build.bat".\r
+\r
+The Windows kernel build system is rather limited and rather than\r
+really rather messing up the directory/file structure just for the\r
+Windows kernel platform, I've instead arranged it that these batch\r
+files do the necessary work so that "build" will work.\r
+\r
+The batch files are idempotent; you can run them as often as you\r
+like, in any order, at any time (before or after builds), and they'll\r
+do the right thing. You need to clean between switching targets.\r
+\r
+Linux\r
+=====\r
+Use GNUmake to run "makefile.linux". Targets are "arrel", "ardbg",\r
+"sorel", "sodbg" and "clean". You need to clean between switching\r
+targets.\r
+\r
+\r
--- /dev/null
+DIRS = src
+
--- /dev/null
+#ifndef __LIBLFDS611_H\r
+\r
+ /***** library header *****/\r
+ #define LFDS611_RELEASE_NUMBER_STRING "6.1.1"\r
+\r
+\r
+\r
+\r
+ /***** lfds611_abstraction *****/\r
+\r
+ /***** defines *****/\r
+ #if (defined _WIN64 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)\r
+ // TRD : 64-bit Windows user-mode with the Microsoft C compiler, any CPU\r
+ #include <assert.h>\r
+ #include <stdio.h>\r
+ #include <stdlib.h>\r
+ #include <windows.h>\r
+ #include <intrin.h>\r
+ typedef unsigned __int64 lfds611_atom_t;\r
+ #define LFDS611_INLINE __forceinline\r
+ #define LFDS611_ALIGN(alignment) __declspec( align(alignment) )\r
+ #define LFDS611_ALIGN_SINGLE_POINTER 8\r
+ #define LFDS611_ALIGN_DOUBLE_POINTER 16\r
+ #define LFDS611_BARRIER_COMPILER_LOAD _ReadBarrier()\r
+ #define LFDS611_BARRIER_COMPILER_STORE _WriteBarrier()\r
+ #define LFDS611_BARRIER_COMPILER_FULL _ReadWriteBarrier()\r
+ #define LFDS611_BARRIER_PROCESSOR_LOAD _mm_lfence()\r
+ #define LFDS611_BARRIER_PROCESSOR_STORE _mm_sfence()\r
+ #define LFDS611_BARRIER_PROCESSOR_FULL _mm_mfence()\r
+ #endif\r
+\r
+ #if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)\r
+ // TRD : 32-bit Windows user-mode with the Microsoft C compiler, any CPU\r
+ #include <assert.h>\r
+ #include <stdio.h>\r
+ #include <stdlib.h>\r
+ #include <windows.h>\r
+ #include <intrin.h>\r
+ typedef unsigned long int lfds611_atom_t;\r
+ #define LFDS611_INLINE __forceinline\r
+ #define LFDS611_ALIGN(alignment) __declspec( align(alignment) )\r
+ #define LFDS611_ALIGN_SINGLE_POINTER 4\r
+ #define LFDS611_ALIGN_DOUBLE_POINTER 8\r
+ #define LFDS611_BARRIER_COMPILER_LOAD _ReadBarrier()\r
+ #define LFDS611_BARRIER_COMPILER_STORE _WriteBarrier()\r
+ #define LFDS611_BARRIER_COMPILER_FULL _ReadWriteBarrier()\r
+ #define LFDS611_BARRIER_PROCESSOR_LOAD _mm_lfence()\r
+ #define LFDS611_BARRIER_PROCESSOR_STORE _mm_sfence()\r
+ #define LFDS611_BARRIER_PROCESSOR_FULL _mm_mfence()\r
+\r
+ // TRD : this define is documented but missing in Microsoft Platform SDK v7.0\r
+ #define _InterlockedCompareExchangePointer(destination, exchange, compare) _InterlockedCompareExchange((volatile long *) destination, (long) exchange, (long) compare)\r
+ #endif\r
+\r
+ #if (defined _WIN64 && defined _MSC_VER && defined WIN_KERNEL_BUILD)\r
+ // TRD : 64-bit Windows kernel with the Microsoft C compiler, any CPU\r
+ #include <assert.h>\r
+ #include <stdio.h>\r
+ #include <stdlib.h>\r
+ #include <wdm.h>\r
+ typedef unsigned __int64 lfds611_atom_t;\r
+ #define LFDS611_INLINE __forceinline\r
+ #define LFDS611_ALIGN(alignment) __declspec( align(alignment) )\r
+ #define LFDS611_ALIGN_SINGLE_POINTER 8\r
+ #define LFDS611_ALIGN_DOUBLE_POINTER 16\r
+ #define LFDS611_BARRIER_COMPILER_LOAD _ReadBarrier()\r
+ #define LFDS611_BARRIER_COMPILER_STORE _WriteBarrier()\r
+ #define LFDS611_BARRIER_COMPILER_FULL _ReadWriteBarrier()\r
+ #define LFDS611_BARRIER_PROCESSOR_LOAD _mm_lfence()\r
+ #define LFDS611_BARRIER_PROCESSOR_STORE _mm_sfence()\r
+ #define LFDS611_BARRIER_PROCESSOR_FULL _mm_mfence()\r
+ #endif\r
+\r
+ #if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)\r
+ // TRD : 32-bit Windows kernel with the Microsoft C compiler, any CPU\r
+ #include <assert.h>\r
+ #include <stdio.h>\r
+ #include <stdlib.h>\r
+ #include <wdm.h>\r
+ typedef unsigned long int lfds611_atom_t;\r
+ #define LFDS611_INLINE __forceinline\r
+ #define LFDS611_ALIGN(alignment) __declspec( align(alignment) )\r
+ #define LFDS611_ALIGN_SINGLE_POINTER 4\r
+ #define LFDS611_ALIGN_DOUBLE_POINTER 8\r
+ #define LFDS611_BARRIER_COMPILER_LOAD _ReadBarrier()\r
+ #define LFDS611_BARRIER_COMPILER_STORE _WriteBarrier()\r
+ #define LFDS611_BARRIER_COMPILER_FULL _ReadWriteBarrier()\r
+ #define LFDS611_BARRIER_PROCESSOR_LOAD _mm_lfence()\r
+ #define LFDS611_BARRIER_PROCESSOR_STORE _mm_sfence()\r
+ #define LFDS611_BARRIER_PROCESSOR_FULL _mm_mfence()\r
+\r
+ // TRD : this define is documented but missing in Microsoft Platform SDK v7.0\r
+ #define _InterlockedCompareExchangePointer(destination, exchange, compare) _InterlockedCompareExchange((volatile long *) destination, (long) exchange, (long) compare)\r
+ #endif\r
+\r
+ #if (defined __unix__ && defined __x86_64__ && __GNUC__)\r
+ // TRD : any UNIX with GCC on x64\r
+ #include <assert.h>\r
+ #include <stdio.h>\r
+ #include <stdlib.h>\r
+ typedef unsigned long long int lfds611_atom_t;\r
+ #define LFDS611_INLINE inline\r
+ #define LFDS611_ALIGN(alignment) __attribute__( (aligned(alignment)) )\r
+ #define LFDS611_ALIGN_SINGLE_POINTER 8\r
+ #define LFDS611_ALIGN_DOUBLE_POINTER 16\r
+ #define LFDS611_BARRIER_COMPILER_LOAD __asm__ __volatile__ ( "" : : : "memory" )\r
+ #define LFDS611_BARRIER_COMPILER_STORE __asm__ __volatile__ ( "" : : : "memory" )\r
+ #define LFDS611_BARRIER_COMPILER_FULL __asm__ __volatile__ ( "" : : : "memory" )\r
+ #define LFDS611_BARRIER_PROCESSOR_LOAD __sync_synchronize()\r
+ #define LFDS611_BARRIER_PROCESSOR_STORE __sync_synchronize()\r
+ #define LFDS611_BARRIER_PROCESSOR_FULL __sync_synchronize()\r
+ #endif\r
+\r
+ #if (defined __unix__ && defined __i686__ && __GNUC__)\r
+ // TRD : any UNIX with GCC on x86\r
+ #include <assert.h>\r
+ #include <stdio.h>\r
+ #include <stdlib.h>\r
+ typedef unsigned long int lfds611_atom_t;\r
+ #define LFDS611_INLINE inline\r
+ #define LFDS611_ALIGN(alignment) __attribute__( (aligned(alignment)) )\r
+ #define LFDS611_ALIGN_SINGLE_POINTER 4\r
+ #define LFDS611_ALIGN_DOUBLE_POINTER 8\r
+ #define LFDS611_BARRIER_COMPILER_LOAD __asm__ __volatile__ ( "" : : : "memory" )\r
+ #define LFDS611_BARRIER_COMPILER_STORE __asm__ __volatile__ ( "" : : : "memory" )\r
+ #define LFDS611_BARRIER_COMPILER_FULL __asm__ __volatile__ ( "" : : : "memory" )\r
+ #define LFDS611_BARRIER_PROCESSOR_LOAD __sync_synchronize()\r
+ #define LFDS611_BARRIER_PROCESSOR_STORE __sync_synchronize()\r
+ #define LFDS611_BARRIER_PROCESSOR_FULL __sync_synchronize()\r
+ #endif\r
+\r
+ #if (defined __unix__ && defined __arm__ && __GNUC__)\r
+ // TRD : any UNIX with GCC on ARM\r
+ #include <assert.h>\r
+ #include <stdio.h>\r
+ #include <stdlib.h>\r
+ typedef unsigned long int lfds611_atom_t;\r
+ #define LFDS611_INLINE inline\r
+ #define LFDS611_ALIGN(alignment) __attribute__( (aligned(alignment)) )\r
+ #define LFDS611_ALIGN_SINGLE_POINTER 4\r
+ #define LFDS611_ALIGN_DOUBLE_POINTER 8\r
+ #define LFDS611_BARRIER_COMPILER_LOAD __asm__ __volatile__ ( "" : : : "memory" )\r
+ #define LFDS611_BARRIER_COMPILER_STORE __asm__ __volatile__ ( "" : : : "memory" )\r
+ #define LFDS611_BARRIER_COMPILER_FULL __asm__ __volatile__ ( "" : : : "memory" )\r
+ #define LFDS611_BARRIER_PROCESSOR_LOAD __sync_synchronize()\r
+ #define LFDS611_BARRIER_PROCESSOR_STORE __sync_synchronize()\r
+ #define LFDS611_BARRIER_PROCESSOR_FULL __sync_synchronize()\r
+ #endif\r
+\r
+ #define LFDS611_BARRIER_LOAD LFDS611_BARRIER_COMPILER_LOAD; LFDS611_BARRIER_PROCESSOR_LOAD; LFDS611_BARRIER_COMPILER_LOAD\r
+ #define LFDS611_BARRIER_STORE LFDS611_BARRIER_COMPILER_STORE; LFDS611_BARRIER_PROCESSOR_STORE; LFDS611_BARRIER_COMPILER_STORE\r
+ #define LFDS611_BARRIER_FULL LFDS611_BARRIER_COMPILER_FULL; LFDS611_BARRIER_PROCESSOR_FULL; LFDS611_BARRIER_COMPILER_FULL\r
+\r
+ /***** enums *****/\r
+ enum lfds611_data_structure_validity\r
+ {\r
+ LFDS611_VALIDITY_VALID,\r
+ LFDS611_VALIDITY_INVALID_LOOP,\r
+ LFDS611_VALIDITY_INVALID_MISSING_ELEMENTS,\r
+ LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS,\r
+ LFDS611_VALIDITY_INVALID_TEST_DATA\r
+ };\r
+\r
+ /***** structs *****/\r
+ struct lfds611_validation_info\r
+ {\r
+ lfds611_atom_t\r
+ min_elements,\r
+ max_elements;\r
+ };\r
+\r
+ /***** public prototypes *****/\r
+ void *lfds611_abstraction_malloc( size_t size );\r
+ void lfds611_abstraction_free( void *memory );\r
+\r
+\r
+\r
+\r
+\r
+ /***** lfds611_freelist *****/\r
+\r
+ /***** enums *****/\r
+ enum lfds611_freelist_query_type\r
+ {\r
+ LFDS611_FREELIST_QUERY_ELEMENT_COUNT,\r
+ LFDS611_FREELIST_QUERY_VALIDATE\r
+ };\r
+\r
+ /***** incomplete types *****/\r
+ struct lfds611_freelist_state;\r
+ struct lfds611_freelist_element;\r
+\r
+ /***** public prototypes *****/\r
+ int lfds611_freelist_new( struct lfds611_freelist_state **fs, lfds611_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state );\r
+ void lfds611_freelist_use( struct lfds611_freelist_state *fs );\r
+ void lfds611_freelist_delete( struct lfds611_freelist_state *fs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );\r
+\r
+ lfds611_atom_t lfds611_freelist_new_elements( struct lfds611_freelist_state *fs, lfds611_atom_t number_elements );\r
+\r
+ struct lfds611_freelist_element *lfds611_freelist_pop( struct lfds611_freelist_state *fs, struct lfds611_freelist_element **fe );\r
+ struct lfds611_freelist_element *lfds611_freelist_guaranteed_pop( struct lfds611_freelist_state *fs, struct lfds611_freelist_element **fe );\r
+ void lfds611_freelist_push( struct lfds611_freelist_state *fs, struct lfds611_freelist_element *fe );\r
+\r
+ void *lfds611_freelist_get_user_data_from_element( struct lfds611_freelist_element *fe, void **user_data );\r
+ void lfds611_freelist_set_user_data_in_element( struct lfds611_freelist_element *fe, void *user_data );\r
+\r
+ void lfds611_freelist_query( struct lfds611_freelist_state *fs, enum lfds611_freelist_query_type query_type, void *query_input, void *query_output );\r
+\r
+\r
+\r
+\r
+\r
+ /***** lfds611_liblfds *****/\r
+\r
+ /***** public prototypes *****/\r
+ void lfds611_liblfds_abstraction_test_helper_increment_non_atomic( lfds611_atom_t *shared_counter );\r
+ void lfds611_liblfds_abstraction_test_helper_increment_atomic( volatile lfds611_atom_t *shared_counter );\r
+ void lfds611_liblfds_abstraction_test_helper_cas( volatile lfds611_atom_t *shared_counter, lfds611_atom_t *local_counter );\r
+ void lfds611_liblfds_abstraction_test_helper_dcas( volatile lfds611_atom_t *shared_counter, lfds611_atom_t *local_counter );\r
+\r
+\r
+\r
+\r
+\r
+ /***** lfds611_queue *****/\r
+\r
+ /***** enums *****/\r
+ enum lfds611_queue_query_type\r
+ {\r
+ LFDS611_QUEUE_QUERY_ELEMENT_COUNT,\r
+ LFDS611_QUEUE_QUERY_VALIDATE\r
+ };\r
+\r
+ /***** incomplete types *****/\r
+ struct lfds611_queue_state;\r
+\r
+ /***** public prototypes *****/\r
+ int lfds611_queue_new( struct lfds611_queue_state **sq, lfds611_atom_t number_elements );\r
+ void lfds611_queue_use( struct lfds611_queue_state *qs );\r
+ void lfds611_queue_delete( struct lfds611_queue_state *qs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );\r
+\r
+ int lfds611_queue_enqueue( struct lfds611_queue_state *qs, void *user_data );\r
+ int lfds611_queue_guaranteed_enqueue( struct lfds611_queue_state *qs, void *user_data );\r
+ int lfds611_queue_dequeue( struct lfds611_queue_state *qs, void **user_data );\r
+\r
+ void lfds611_queue_query( struct lfds611_queue_state *qs, enum lfds611_queue_query_type query_type, void *query_input, void *query_output );\r
+\r
+\r
+\r
+\r
+\r
+ /***** lfds611_ringbuffer *****/\r
+\r
+ /***** enums *****/\r
+ enum lfds611_ringbuffer_query_type\r
+ {\r
+ LFDS611_RINGBUFFER_QUERY_VALIDATE\r
+ };\r
+\r
+ /***** incomplete types *****/\r
+ struct lfds611_ringbuffer_state;\r
+\r
+ /***** public prototypes *****/\r
+ int lfds611_ringbuffer_new( struct lfds611_ringbuffer_state **rs, lfds611_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state );\r
+ void lfds611_ringbuffer_use( struct lfds611_ringbuffer_state *rs );\r
+ void lfds611_ringbuffer_delete( struct lfds611_ringbuffer_state *rs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );\r
+\r
+ struct lfds611_freelist_element *lfds611_ringbuffer_get_read_element( struct lfds611_ringbuffer_state *rs, struct lfds611_freelist_element **fe );\r
+ struct lfds611_freelist_element *lfds611_ringbuffer_get_write_element( struct lfds611_ringbuffer_state *rs, struct lfds611_freelist_element **fe, int *overwrite_flag );\r
+\r
+ void lfds611_ringbuffer_put_read_element( struct lfds611_ringbuffer_state *rs, struct lfds611_freelist_element *fe );\r
+ void lfds611_ringbuffer_put_write_element( struct lfds611_ringbuffer_state *rs, struct lfds611_freelist_element *fe );\r
+\r
+ void lfds611_ringbuffer_query( struct lfds611_ringbuffer_state *rs, enum lfds611_ringbuffer_query_type query_type, void *query_input, void *query_output );\r
+\r
+\r
+\r
+\r
+\r
+ /***** lfds611_slist *****/\r
+\r
+ /***** incomplete types *****/\r
+ struct lfds611_slist_state;\r
+ struct lfds611_slist_element;\r
+\r
+ /***** public prototypes *****/\r
+ int lfds611_slist_new( struct lfds611_slist_state **ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );\r
+ void lfds611_slist_use( struct lfds611_slist_state *ss );\r
+ void lfds611_slist_delete( struct lfds611_slist_state *ss );\r
+\r
+ struct lfds611_slist_element *lfds611_slist_new_head( struct lfds611_slist_state *ss, void *user_data );\r
+ struct lfds611_slist_element *lfds611_slist_new_next( struct lfds611_slist_element *se, void *user_data );\r
+\r
+ int lfds611_slist_logically_delete_element( struct lfds611_slist_state *ss, struct lfds611_slist_element *se );\r
+ void lfds611_slist_single_threaded_physically_delete_all_elements( struct lfds611_slist_state *ss );\r
+\r
+ int lfds611_slist_get_user_data_from_element( struct lfds611_slist_element *se, void **user_data );\r
+ int lfds611_slist_set_user_data_in_element( struct lfds611_slist_element *se, void *user_data );\r
+\r
+ struct lfds611_slist_element *lfds611_slist_get_head( struct lfds611_slist_state *ss, struct lfds611_slist_element **se );\r
+ struct lfds611_slist_element *lfds611_slist_get_next( struct lfds611_slist_element *se, struct lfds611_slist_element **next_se );\r
+ struct lfds611_slist_element *lfds611_slist_get_head_and_then_next( struct lfds611_slist_state *ss, struct lfds611_slist_element **se );\r
+\r
+\r
+\r
+\r
+\r
+ /***** lfds611_stack *****/\r
+\r
+ /***** enums *****/\r
+ enum lfds611_stack_query_type\r
+ {\r
+ LFDS611_STACK_QUERY_ELEMENT_COUNT,\r
+ LFDS611_STACK_QUERY_VALIDATE\r
+ };\r
+\r
+ /***** incomplete types *****/\r
+ struct lfds611_stack_state;\r
+\r
+ /***** public prototypes *****/\r
+ int lfds611_stack_new( struct lfds611_stack_state **ss, lfds611_atom_t number_elements );\r
+ void lfds611_stack_use( struct lfds611_stack_state *ss );\r
+ void lfds611_stack_delete( struct lfds611_stack_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );\r
+\r
+ void lfds611_stack_clear( struct lfds611_stack_state *ss, void (*user_data_clear_function)(void *user_data, void *user_state), void *user_state );\r
+\r
+ int lfds611_stack_push( struct lfds611_stack_state *ss, void *user_data );\r
+ int lfds611_stack_guaranteed_push( struct lfds611_stack_state *ss, void *user_data );\r
+ int lfds611_stack_pop( struct lfds611_stack_state *ss, void **user_data );\r
+\r
+ void lfds611_stack_query( struct lfds611_stack_state *ss, enum lfds611_stack_query_type query_type, void *query_input, void *query_output );\r
+\r
+\r
+\r
+\r
+\r
+ #define __LIBLFDS611_H\r
+\r
+#endif\r
+\r
--- /dev/null
+EXPORTS\r
+\r
+lfds611_liblfds_abstraction_test_helper_increment_non_atomic = lfds611_liblfds_abstraction_test_helper_increment_non_atomic @1\r
+lfds611_liblfds_abstraction_test_helper_increment_atomic = lfds611_liblfds_abstraction_test_helper_increment_atomic @2\r
+lfds611_liblfds_abstraction_test_helper_cas = lfds611_liblfds_abstraction_test_helper_cas @3\r
+lfds611_liblfds_abstraction_test_helper_dcas = lfds611_liblfds_abstraction_test_helper_dcas @4\r
+\r
+lfds611_freelist_delete = lfds611_freelist_delete @5\r
+lfds611_freelist_get_user_data_from_element = lfds611_freelist_get_user_data_from_element @6\r
+lfds611_freelist_guaranteed_pop = lfds611_freelist_guaranteed_pop @7\r
+lfds611_freelist_new = lfds611_freelist_new @8\r
+lfds611_freelist_new_elements = lfds611_freelist_new_elements @9\r
+lfds611_freelist_pop = lfds611_freelist_pop @10\r
+lfds611_freelist_push = lfds611_freelist_push @11\r
+lfds611_freelist_query = lfds611_freelist_query @12\r
+lfds611_freelist_set_user_data_in_element = lfds611_freelist_set_user_data_in_element @13\r
+lfds611_freelist_use = lfds611_freelist_use @14\r
+\r
+lfds611_queue_delete = lfds611_queue_delete @15\r
+lfds611_queue_dequeue = lfds611_queue_dequeue @16\r
+lfds611_queue_enqueue = lfds611_queue_enqueue @17\r
+lfds611_queue_guaranteed_enqueue = lfds611_queue_guaranteed_enqueue @18\r
+lfds611_queue_new = lfds611_queue_new @19\r
+lfds611_queue_query = lfds611_queue_query @20\r
+lfds611_queue_use = lfds611_queue_use @21\r
+\r
+lfds611_ringbuffer_delete = lfds611_ringbuffer_delete @22\r
+lfds611_ringbuffer_get_read_element = lfds611_ringbuffer_get_read_element @23\r
+lfds611_ringbuffer_get_write_element = lfds611_ringbuffer_get_write_element @24\r
+lfds611_ringbuffer_new = lfds611_ringbuffer_new @25\r
+lfds611_ringbuffer_put_read_element = lfds611_ringbuffer_put_read_element @26\r
+lfds611_ringbuffer_put_write_element = lfds611_ringbuffer_put_write_element @27\r
+lfds611_ringbuffer_query = lfds611_ringbuffer_query @28\r
+lfds611_ringbuffer_use = lfds611_ringbuffer_use @29\r
+\r
+lfds611_slist_delete = lfds611_slist_delete @30\r
+lfds611_slist_get_head = lfds611_slist_get_head @31\r
+lfds611_slist_get_head_and_then_next = lfds611_slist_get_head_and_then_next @32\r
+lfds611_slist_get_next = lfds611_slist_get_next @33\r
+lfds611_slist_get_user_data_from_element = lfds611_slist_get_user_data_from_element @34\r
+lfds611_slist_logically_delete_element = lfds611_slist_logically_delete_element @35\r
+lfds611_slist_new = lfds611_slist_new @36\r
+lfds611_slist_new_head = lfds611_slist_new_head @37\r
+lfds611_slist_new_next = lfds611_slist_new_next @38\r
+lfds611_slist_set_user_data_in_element = lfds611_slist_set_user_data_in_element @39\r
+lfds611_slist_single_threaded_physically_delete_all_elements = lfds611_slist_single_threaded_physically_delete_all_elements @40\r
+lfds611_slist_use = lfds611_slist_use @41\r
+\r
+lfds611_stack_clear = lfds611_stack_clear @42\r
+lfds611_stack_delete = lfds611_stack_delete @43\r
+lfds611_stack_guaranteed_push = lfds611_stack_guaranteed_push @44\r
+lfds611_stack_new = lfds611_stack_new @45\r
+lfds611_stack_pop = lfds611_stack_pop @46\r
+lfds611_stack_push = lfds611_stack_push @47\r
+lfds611_stack_query = lfds611_stack_query @48\r
+lfds611_stack_use = lfds611_stack_use @49\r
+\r
--- /dev/null
+\r
+Microsoft Visual Studio Solution File, Format Version 10.00\r
+# Visual Studio 2008\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblfds611", "liblfds611.vcproj", "{F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug DLL|Win32 = Debug DLL|Win32\r
+ Debug DLL|x64 = Debug DLL|x64\r
+ Debug Lib|Win32 = Debug Lib|Win32\r
+ Debug Lib|x64 = Debug Lib|x64\r
+ Release DLL|Win32 = Release DLL|Win32\r
+ Release DLL|x64 = Release DLL|x64\r
+ Release Lib|Win32 = Release Lib|Win32\r
+ Release Lib|x64 = Release Lib|x64\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.Build.0 = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.ActiveCfg = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.Build.0 = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.ActiveCfg = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.Build.0 = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.ActiveCfg = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.Build.0 = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.ActiveCfg = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.Build.0 = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.ActiveCfg = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.Build.0 = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.ActiveCfg = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.Build.0 = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.ActiveCfg = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.Build.0 = Release Lib|x64\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+<?xml version="1.0" encoding="Windows-1252"?>\r
+<VisualStudioProject\r
+ ProjectType="Visual C++"\r
+ Version="9.00"\r
+ Name="liblfds611"\r
+ ProjectGUID="{F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}"\r
+ RootNamespace="liblfds"\r
+ TargetFrameworkVersion="196613"\r
+ >\r
+ <Platforms>\r
+ <Platform\r
+ Name="Win32"\r
+ />\r
+ <Platform\r
+ Name="x64"\r
+ />\r
+ </Platforms>\r
+ <ToolFiles>\r
+ </ToolFiles>\r
+ <Configurations>\r
+ <Configuration\r
+ Name="Debug Lib|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ MinimalRebuild="true"\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug Lib|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ SmallerTypeCheck="true"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release Lib|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release Lib|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="4"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLibrarianTool"\r
+ AdditionalOptions="/wx"\r
+ AdditionalLibraryDirectories=""\r
+ IgnoreAllDefaultLibraries="true"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug DLL|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ MinimalRebuild="true"\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ RuntimeLibrary="3"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds611.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug DLL|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ TargetEnvironment="3"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ SmallerTypeCheck="true"\r
+ RuntimeLibrary="3"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds611.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release DLL|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="2"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds611.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release DLL|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="2"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="0"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ TargetEnvironment="3"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\inc""\r
+ StringPooling="true"\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="2"\r
+ BufferSecurityCheck="false"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="msvcrt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ ModuleDefinitionFile="$(ProjectDir)\liblfds611.def"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ </Configurations>\r
+ <References>\r
+ </References>\r
+ <Files>\r
+ <Filter\r
+ Name="inc"\r
+ Filter="h;hpp;hxx;hm;inl;inc;xsd"\r
+ UniqueIdentifier="{93995380-89BD-4b04-88EB-625FBE52EBFB}"\r
+ >\r
+ <File\r
+ RelativePath=".\inc\liblfds611.h"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="src"\r
+ >\r
+ <File\r
+ RelativePath=".\src\liblfds611_internal.h"\r
+ >\r
+ </File>\r
+ <Filter\r
+ Name="lfds611_abstraction"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds611_abstraction\lfds611_abstraction_free.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_abstraction\lfds611_abstraction_internal_body.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_abstraction\lfds611_abstraction_internal_wrapper.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_abstraction\lfds611_abstraction_malloc.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds611_freelist"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds611_freelist\lfds611_freelist_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_freelist\lfds611_freelist_get_and_set.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_freelist\lfds611_freelist_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_freelist\lfds611_freelist_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_freelist\lfds611_freelist_pop_push.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_freelist\lfds611_freelist_query.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds611_queue"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds611_queue\lfds611_queue_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_queue\lfds611_queue_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_queue\lfds611_queue_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_queue\lfds611_queue_query.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_queue\lfds611_queue_queue.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds611_ringbuffer"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds611_ringbuffer\lfds611_ringbuffer_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_ringbuffer\lfds611_ringbuffer_get_and_put.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_ringbuffer\lfds611_ringbuffer_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_ringbuffer\lfds611_ringbuffer_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_ringbuffer\lfds611_ringbuffer_query.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds611_slist"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds611_slist\lfds611_slist_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_slist\lfds611_slist_get_and_set.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_slist\lfds611_slist_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_slist\lfds611_slist_link.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_slist\lfds611_slist_new.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds611_stack"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds611_stack\lfds611_stack_delete.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_stack\lfds611_stack_internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_stack\lfds611_stack_new.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_stack\lfds611_stack_push_pop.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_stack\lfds611_stack_query.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ <Filter\r
+ Name="lfds611_liblfds"\r
+ >\r
+ <File\r
+ RelativePath=".\src\lfds611_liblfds\lfds611_liblfds_abstraction_test_helpers.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_liblfds\lfds611_liblfds_aligned_free.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_liblfds\lfds611_liblfds_aligned_malloc.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\lfds611_liblfds\lfds611_liblfds_internal.h"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ </Filter>\r
+ </Files>\r
+ <Globals>\r
+ </Globals>\r
+</VisualStudioProject>\r
--- /dev/null
+##### paths #####\r
+BINDIR = bin\r
+INCDIR = inc\r
+OBJDIR = obj\r
+SRCDIR = src\r
+\r
+##### misc #####\r
+QUIETLY = 1>/dev/null 2>/dev/null\r
+\r
+##### sources, objects and libraries #####\r
+BINNAME = liblfds611\r
+AR_BINARY = $(BINDIR)/$(BINNAME).a\r
+SO_BINARY = $(BINDIR)/$(BINNAME).so\r
+SRCDIRS = lfds611_abstraction lfds611_freelist lfds611_liblfds lfds611_queue lfds611_ringbuffer lfds611_slist lfds611_stack\r
+# TRD : be aware - in the linux makefile, with the one-pass linking behaviour of the GNU linker, the order\r
+# of source files matters! this is because it leads to the ordering of objects in the library and\r
+# that in turn, since the data structures all use the freelist API and the abstraction API, has to be\r
+# correct \r
+# TRD : lfds611_abstraction_cas.c lfds611_abstraction_dcas.c lfds611_abstraction_increment.c are inlined and are compiled by every C file\r
+SOURCES = lfds611_queue_delete.c lfds611_queue_new.c lfds611_queue_query.c lfds611_queue_queue.c \\r
+ lfds611_ringbuffer_delete.c lfds611_ringbuffer_get_and_put.c lfds611_ringbuffer_new.c lfds611_ringbuffer_query.c \\r
+ lfds611_slist_delete.c lfds611_slist_get_and_set.c lfds611_slist_link.c lfds611_slist_new.c \\r
+ lfds611_stack_delete.c lfds611_stack_new.c lfds611_stack_push_pop.c lfds611_stack_query.c \\r
+ lfds611_freelist_delete.c lfds611_freelist_get_and_set.c lfds611_freelist_new.c lfds611_freelist_query.c lfds611_freelist_pop_push.c \\r
+ lfds611_liblfds_abstraction_test_helpers.c lfds611_liblfds_aligned_free.c lfds611_liblfds_aligned_malloc.c \\r
+ lfds611_abstraction_free.c lfds611_abstraction_malloc.c\r
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))\r
+\r
+##### CPU variants #####\r
+UNAME = $(shell uname -m)\r
+GCCARCH = -march=$(UNAME)\r
+\r
+ifeq ($(UNAME),x86_64)\r
+ GCCARCH = -march=core2\r
+endif\r
+\r
+ifeq ($(findstring arm,$(UNAME)),arm)\r
+ GCCARCH = -march=armv6k -marm\r
+endif\r
+\r
+##### tools #####\r
+MAKE = make\r
+MFLAGS = \r
+\r
+DG = gcc\r
+DGFLAGS = -MM -std=c99 -I"$(SRCDIR)" -I"$(INCDIR)"\r
+\r
+CC = gcc\r
+CBASE = -Wall -Wno-unknown-pragmas -std=c99 $(GCCARCH) -pthread -c -I"$(SRCDIR)" -I"$(INCDIR)"\r
+CFREL = -O2 -finline-functions -Wno-strict-aliasing\r
+CFDBG = -O0 -g\r
+\r
+AR = ar\r
+AFLAGS = -rcs\r
+\r
+LD = gcc\r
+LFBASE = -Wall -std=c99 -shared\r
+LFREL = -O2 -s\r
+LFDBG = -O0 -g\r
+\r
+##### rel/dbg .a/.so variants #####\r
+ifeq ($(findstring so,$(MAKECMDGOALS)),so)\r
+ CBASE := $(CBASE) -fpic\r
+endif\r
+\r
+CFLAGS = $(CBASE) $(CFDBG)\r
+LFLAGS = $(LFBASE) $(LFDBG)\r
+\r
+ifeq ($(findstring rel,$(MAKECMDGOALS)),rel)\r
+ CFLAGS = $(CBASE) $(CFREL)\r
+ LFLAGS = $(LFBASE) $(LFREL)\r
+endif\r
+\r
+##### search paths #####\r
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))\r
+\r
+##### implicit rules #####\r
+$(OBJDIR)/%.o : %.c\r
+ $(DG) $(DGFLAGS) $< >$(OBJDIR)/$*.d\r
+ $(CC) $(CFLAGS) -o $@ $<\r
+\r
+##### explicit rules #####\r
+$(AR_BINARY) : $(OBJECTS)\r
+ $(AR) $(AFLAGS) $(AR_BINARY) $(OBJECTS)\r
+\r
+$(SO_BINARY) : $(OBJECTS)\r
+ $(LD) $(LFLAGS) $(SYSLIBS) $(OBJECTS) -o $(SO_BINARY)\r
+\r
+##### phony #####\r
+.PHONY : clean arrel ardbg sorel sodbg\r
+\r
+clean : \r
+ @rm -f $(BINDIR)/$(BINNAME).* $(OBJDIR)/*.o $(OBJDIR)/*.d\r
+\r
+arrel : $(AR_BINARY)\r
+ardbg : $(AR_BINARY)\r
+\r
+sorel : $(SO_BINARY)\r
+sodbg : $(SO_BINARY)\r
+\r
+##### dependencies #####\r
+-include $(DEPENDS)\r
+\r
+##### notes #####\r
+# TRD : we use -std=c99 purely to permit C++ style comments\r
+\r
--- /dev/null
+##### paths #####\r
+BINDIR = bin\r
+INCDIR = inc\r
+OBJDIR = obj\r
+SRCDIR = src\r
+\r
+##### misc #####\r
+QUIETLY = 1>nul 2>nul\r
+\r
+##### sources, objects and libraries #####\r
+BINNAME = liblfds611\r
+LIB_BINARY = $(BINDIR)\$(BINNAME).lib\r
+DLL_BINARY = $(BINDIR)\$(BINNAME).dll\r
+SRCDIRS = lfds611_abstraction lfds611_freelist lfds611_liblfds lfds611_queue lfds611_ringbuffer lfds611_slist lfds611_stack\r
+# TRD : lfds611_abstraction_cas.c lfds611_abstraction_dcas.c lfds611_abstraction_increment.c are inlined and are compiled by every C file\r
+SOURCES = lfds611_abstraction_free.c lfds611_abstraction_malloc.c \\r
+ lfds611_freelist_delete.c lfds611_freelist_get_and_set.c lfds611_freelist_new.c lfds611_freelist_query.c lfds611_freelist_pop_push.c \\r
+ lfds611_liblfds_abstraction_test_helpers.c lfds611_liblfds_aligned_free.c lfds611_liblfds_aligned_malloc.c \\r
+ lfds611_queue_delete.c lfds611_queue_new.c lfds611_queue_query.c lfds611_queue_queue.c \\r
+ lfds611_ringbuffer_delete.c lfds611_ringbuffer_get_and_put.c lfds611_ringbuffer_new.c lfds611_ringbuffer_query.c \\r
+ lfds611_slist_delete.c lfds611_slist_get_and_set.c lfds611_slist_link.c lfds611_slist_new.c \\r
+ lfds611_stack_delete.c lfds611_stack_new.c lfds611_stack_push_pop.c lfds611_stack_query.c\r
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))\r
+SYSLIBS = kernel32.lib\r
+\r
+##### tools #####\r
+MAKE = make\r
+MFLAGS = \r
+\r
+CC = cl\r
+CBASE = /nologo /W4 /WX /c "-I$(SRCDIR)" "-I$(INCDIR)" "/Fd$(BINDIR)\$(BINNAME).pdb" /DUNICODE /D_UNICODE /DWIN32_LEAN_AND_MEAN\r
+CFREL = /Ox /DNDEBUG\r
+CFDBG = /Od /Gm /Zi /D_DEBUG\r
+\r
+AR = lib\r
+AFLAGS = /nologo /subsystem:console /wx /verbose\r
+\r
+LD = link\r
+LFBASE = /dll /def:$(BINNAME).def /nologo /subsystem:console /wx /nodefaultlib /nxcompat\r
+LFREL = /incremental:no\r
+LFDBG = /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"\r
+\r
+##### variants #####\r
+CFLAGS = $(CBASE) $(CFDBG) /MTd\r
+LFLAGS = $(LFBASE) $(LFDBG)\r
+CLIB = libcmtd.lib\r
+\r
+ifeq ($(MAKECMDGOALS),librel)\r
+ CFLAGS = $(CBASE) $(CFREL) /MT\r
+ LFLAGS = $(LFBASE) $(LFREL)\r
+ CLIB = libcmt.lib\r
+endif\r
+\r
+ifeq ($(MAKECMDGOALS),libdbg)\r
+ CFLAGS = $(CBASE) $(CFDBG) /MTd\r
+ LFLAGS = $(LFBASE) $(LFDBG)\r
+ CLIB = libcmtd.lib\r
+endif\r
+\r
+ifeq ($(MAKECMDGOALS),dllrel)\r
+ CFLAGS = $(CBASE) $(CFREL) /MD\r
+ LFLAGS = $(LFBASE) $(LFREL)\r
+ CLIB = msvcrt.lib\r
+endif\r
+\r
+ifeq ($(MAKECMDGOALS),dlldbg)\r
+ CFLAGS = $(CBASE) $(CFDBG) /MDd\r
+ LFLAGS = $(LFBASE) $(LFDBG)\r
+ CLIB = msvcrtd.lib\r
+endif\r
+\r
+##### search paths #####\r
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))\r
+\r
+##### implicit rules #####\r
+$(OBJDIR)/%.obj : %.c\r
+ $(CC) $(CFLAGS) "/Fo$@" $<\r
+\r
+##### explicit rules #####\r
+$(LIB_BINARY) : $(OBJECTS)\r
+ $(AR) $(AFLAGS) $(OBJECTS) /out:$(LIB_BINARY)\r
+\r
+$(DLL_BINARY) : $(OBJECTS)\r
+ $(LD) $(LFLAGS) $(CLIB) $(SYSLIBS) $(OBJECTS) /out:$(DLL_BINARY)\r
+\r
+##### phony #####\r
+.PHONY : clean librel libdbg dllrel dlldbg\r
+\r
+clean : \r
+ @erase /Q $(BINDIR)\$(BINNAME).* $(OBJDIR)\*.obj $(QUIETLY)\r
+\r
+librel : $(LIB_BINARY)\r
+libdbg : $(LIB_BINARY)\r
+\r
+dllrel : $(DLL_BINARY)\r
+dlldbg : $(DLL_BINARY)\r
+\r
--- /dev/null
+introduction\r
+============\r
+Welcome to liblfds, a portable, license-free, lock-free data structure library\r
+written in C.\r
+\r
+supported platforms\r
+===================\r
+Out-of-the-box ports are provided for;\r
+\r
+Operating System CPU Toolchain Choices\r
+================ ============= =================\r
+Windows 64-bit x64 1. Microsoft Visual Studio\r
+ 2. Microsoft Windows SDK and GNUmake\r
+\r
+Windows 32-bit x64, x86 1. Microsoft Visual Studio\r
+ 2. Visual C++ Express Edition\r
+ 3. Microsoft Windows SDK and GNUmake\r
+\r
+Windows Kernel x64, x86 1. Windows Driver Kit\r
+\r
+Linux 64-bit x64 1. GCC and GNUmake\r
+\r
+Linux 32-bit ARM, x64, x86 1. GCC and GNUmake\r
+\r
+For more information including version requirements, see the building guide (lfds).\r
+\r
+data structures\r
+===============\r
+This release of liblfds provides the following;\r
+\r
+ * Freelist\r
+ * Queue\r
+ * Ringbuffer (each element read by a single reader)\r
+ * Singly-linked list (logical delete only)\r
+ * Stack\r
+\r
+These are all many-readers, many-writers.\r
+\r
+liblfds on-line\r
+===============\r
+On the liblfds home page, you will find the blog, a bugzilla, a forum, a\r
+mediawiki and the current and all historical releases.\r
+\r
+The mediawiki contains comprehensive documentation for development, building,\r
+testing and porting.\r
+\r
+http://www.liblfds.org\r
+\r
+license\r
+=======\r
+There is no license. You are free to use this code in any way.\r
+\r
+using\r
+=====\r
+Once built, there is a single header file, /inc/liblfds.h, which you must include\r
+in your source code, and a single library file /bin/liblfds.*, where the suffix\r
+depends on your platform and your build choice (static or dynamic), to which,\r
+if statically built, you must link directly or, if dynamically built, you must\r
+arrange your system such that the library can be found by the loader at run-time. \r
+\r
+testing\r
+=======\r
+The library comes with a command line test and benchmark program. This program\r
+requires threads. As such, it is only suitable for platforms which can execute\r
+a command line binary and provide thread support. Currently this means the test\r
+and benchmark program works for all platforms except the Windows Kernel.\r
+\r
+For documentation, see the testing and benchmarking guide in the mediawiki.\r
+\r
+porting\r
+=======\r
+Both the test program and liblfds provide an abstraction layer which acts to\r
+mask platform differences. Porting is the act of implementing on your platform\r
+the functions which make up the abstraction layers. You do not need to port\r
+the test program to port liblfds, but obviously it is recommended, so you can\r
+test your port.\r
+\r
+To support liblfds, your platform MUST support;\r
+\r
+ * atomic single-word* increment\r
+ * atomic single-word compare-and-swap\r
+ * atomic contiguous double-word compare-and-swap*\r
+ * malloc and free\r
+ * compiler directive for alignment of variables declared on the stack\r
+ * compiler directives for compiler barriers and processor barriers\r
+\r
+* A ''word'' here means a type equal in length to the platform pointer size.\r
+* This requirement excludes the Alpha, IA64, MIPS, PowerPC and SPARC platforms.\r
+\r
+Also, your platform MAY support;\r
+\r
+ * compiler keyword for function inlining \r
+\r
+To support the test programme, your platform MUST support;\r
+\r
+ * determining the number of logical cores\r
+ * threads (starting and waiting on for completion)\r
+\r
+For documentation, see the porting guide (lfds) in the mediawiki.\r
+\r
+release history\r
+===============\r
+release 1, 25th September 2009, svn revision 1574.\r
+ - initial release\r
+\r
+release 2, 5th October 2009, svn revision 1599.\r
+ - added abstraction layer for Windows kernel\r
+ - minor code tidyups/fixes\r
+\r
+release 3, 25th October 2009, svn revision 1652.\r
+ - added singly linked list (logical delete only)\r
+ - minor code tidyups/fixes\r
+\r
+release 4, 7th December 2009, svn revision 1716.\r
+ - added ARM support\r
+ - added benchmarking functionality to the test program\r
+ - fixed a profound and pervasive pointer\r
+ declaration bug; earlier releases of liblfds\r
+ *should not be used*\r
+\r
+release 5, 19th December 2009, svn revision 1738.\r
+ - fixed subtle queue bug, which also affected ringbuffer\r
+ and caused data re-ordering under high load\r
+ - added benchmarks for freelist, ringbuffer and stack\r
+\r
+release 6, 29th December 2009, svn revision 1746.\r
+ - fixed two implementation errors, which reduced performance,\r
+ spotted by Codeplug from "http://cboard.cprogramming.com".\r
+\r
+release 6.0.0, 18th December 2012, svn revision 2537\r
+ - introduction of namespaces, e.g. the "lfds600_" prefix\r
+ code otherwise COMPLETELY AND WHOLLY UNCHANGED\r
+ this release is a stepping-stone to 6.1.0\r
+\r
+release 6.0.1, 2nd January 2013, svn revision 3296\r
+ - bug fix where an enum wasn't moved into the new namespacing policy\r
+\r
+release 6.1.0, 31th December 2012, svn revision 2600\r
+ - fixed all existing non-enhancement bugs\r
+ - discovered some new bugs and fixed them too\r
+ - a very few minor changes/enhancements\r
+\r
+release 6.1.1, 2nd January 2013, svn revision 3297\r
+ - crucial bug fix where compiler barriers for atomic operations\r
+ were not brought over from 7.0.0 during backporting\r
+ - minor fix for abstraction tests, two missing store barriers\r
+\r
--- /dev/null
+The Windows kernel build environment is primitive and has a number\r
+of severe limitations; in particular, all source files must be in\r
+one directory and it is not possible to choose the output binary type\r
+(static or dynamic library) from the build command line; rather,\r
+a string has to be modified in a text file used by the build (!)\r
+\r
+To deal with these limitations, it is necessary for a Windows kernel\r
+build to run a batch file prior to building.\r
+\r
+There are two batch files, one for static library builds and the other\r
+for dynamic library builds.\r
+\r
+They are both idempotent; you can run them as often as you like and\r
+switch between them as often as you want. It's all fine; whenever\r
+you run one of them, it will take you from whatever state you were\r
+previously in, into the state you want to be in.\r
+\r
+Both batch files copy all the sources file into a single directory,\r
+"/src/single_dir_for_windows_kernel/".\r
+\r
+The static library batch file will then copy "/sources.static" into\r
+"/src/single_dir_for_windows_kernel/", which will cause a static\r
+library to be built.\r
+\r
+The dynamic library batch file will then copy "/sources.dynamic" into\r
+"/src/single_dir_for_windows_kernel/", which will cause a dynamic\r
+library to be built. It will also copy "src/driver_entry.c" into\r
+"/src/single_dir_for_windows_kernel/", since the linker requires\r
+the DriverEntry function to exist for dynamic libraries, even\r
+though it's not used.\r
+\r
+\r
--- /dev/null
+@echo off\r
+rmdir /q /s src\single_dir_for_windows_kernel 1>nul 2>nul\r
+mkdir src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_abstraction\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_freelist\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_liblfds\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_queue\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_ringbuffer\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_slist\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_stack\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y sources.dynamic src\single_dir_for_windows_kernel\sources 1>nul 2>nul\r
+copy /y src\driver_entry.c src\single_dir_for_windows_kernel 1>nul 2>nul\r
+echo Windows kernel dynamic library build directory structure created.\r
+echo (Note the effects of this batch file are idempotent).\r
+\r
--- /dev/null
+@echo off\r
+rmdir /q /s src\single_dir_for_windows_kernel 1>nul 2>nul\r
+mkdir src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_abstraction\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_freelist\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_liblfds\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_queue\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_ringbuffer\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_slist\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y src\lfds611_stack\* src\single_dir_for_windows_kernel 1>nul 2>nul\r
+copy /y sources.static src\single_dir_for_windows_kernel\sources 1>nul 2>nul\r
+erase /f src\single_dir_for_windows_kernel\driver_entry.c 1>nul 2>nul\r
+echo Windows kernel static library build directory structure created.\r
+echo (Note the effects of this batch file are idempotent).\r
+\r
--- /dev/null
+MSC_WARNING_LEVEL = /WX /W4\r
+DLLDEF = ../../liblfds611.def\r
+TARGETNAME = liblfds611\r
+TARGETPATH = ../../bin/\r
+TARGETTYPE = EXPORT_DRIVER\r
+UMTYPE = nt\r
+USER_C_FLAGS = /DWIN_KERNEL_BUILD\r
+\r
+INCLUDES = ..;../../inc/\r
+SOURCES = lfds611_abstraction_free.c \\r
+ lfds611_abstraction_malloc.c \\r
+ lfds611_freelist_delete.c \\r
+ lfds611_freelist_get_and_set.c \\r
+ lfds611_freelist_new.c \\r
+ lfds611_freelist_pop_push.c \\r
+ lfds611_freelist_query.c \\r
+ lfds611_liblfds_abstraction_test_helpers.c \\r
+ lfds611_liblfds_aligned_free.c \\r
+ lfds611_liblfds_aligned_malloc.c \\r
+ lfds611_queue_delete.c \\r
+ lfds611_queue_new.c \\r
+ lfds611_queue_query.c \\r
+ lfds611_queue_queue.c \\r
+ lfds611_ringbuffer_delete.c \\r
+ lfds611_ringbuffer_get_and_put.c \\r
+ lfds611_ringbuffer_new.c \\r
+ lfds611_ringbuffer_query.c \\r
+ lfds611_slist_delete.c \\r
+ lfds611_slist_get_and_set.c \\r
+ lfds611_slist_link.c \\r
+ lfds611_slist_new.c \\r
+ lfds611_stack_delete.c \\r
+ lfds611_stack_new.c \\r
+ lfds611_stack_push_pop.c \\r
+ lfds611_stack_query.c \\r
+ driver_entry.c\r
+\r
--- /dev/null
+MSC_WARNING_LEVEL = /WX /W4\r
+TARGETNAME = liblfds611\r
+TARGETPATH = ../../bin/\r
+TARGETTYPE = DRIVER_LIBRARY\r
+UMTYPE = nt\r
+USER_C_FLAGS = /DWIN_KERNEL_BUILD\r
+\r
+INCLUDES = ..;../../inc/\r
+SOURCES = lfds611_abstraction_free.c \\r
+ lfds611_abstraction_malloc.c \\r
+ lfds611_freelist_delete.c \\r
+ lfds611_freelist_get_and_set.c \\r
+ lfds611_freelist_new.c \\r
+ lfds611_freelist_pop_push.c \\r
+ lfds611_freelist_query.c \\r
+ lfds611_liblfds_abstraction_test_helpers.c \\r
+ lfds611_liblfds_aligned_free.c \\r
+ lfds611_liblfds_aligned_malloc.c \\r
+ lfds611_queue_delete.c \\r
+ lfds611_queue_new.c \\r
+ lfds611_queue_query.c \\r
+ lfds611_queue_queue.c \\r
+ lfds611_ringbuffer_delete.c \\r
+ lfds611_ringbuffer_get_and_put.c \\r
+ lfds611_ringbuffer_new.c \\r
+ lfds611_ringbuffer_query.c \\r
+ lfds611_slist_delete.c \\r
+ lfds611_slist_get_and_set.c \\r
+ lfds611_slist_link.c \\r
+ lfds611_slist_new.c \\r
+ lfds611_stack_delete.c \\r
+ lfds611_stack_new.c \\r
+ lfds611_stack_push_pop.c \\r
+ lfds611_stack_query.c\r
+\r
--- /dev/null
+DIRS = single_dir_for_windows_kernel
+
+
--- /dev/null
+#include "liblfds611_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+NTSTATUS DriverEntry( struct _DRIVER_OBJECT *DriverObject, PUNICODE_STRING RegistryPath )\r
+{\r
+ return( STATUS_SUCCESS );\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
--- /dev/null
+This C file (driver_entry.c) is used when building a dynamic library for\r
+the Windows kernel. It exists to work around one of the limitations of\r
+that build environment. It is not used by any other build; just ignore it.\r
+\r
--- /dev/null
+#include "lfds611_abstraction_internal_body.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN32 && defined _MSC_VER)
+
+ /* TRD : 64 bit and 32 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ static LFDS611_INLINE lfds611_atom_t lfds611_abstraction_cas( volatile lfds611_atom_t *destination, lfds611_atom_t exchange, lfds611_atom_t compare )
+ {
+ lfds611_atom_t
+ rv;
+
+ assert( destination != NULL );
+ // TRD : exchange can be any value in its range
+ // TRD : compare can be any value in its range
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ rv = (lfds611_atom_t) _InterlockedCompareExchangePointer( (void * volatile *) destination, (void *) exchange, (void *) compare );
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ return( rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1 && __GNUC_PATCHLEVEL__ >= 0)
+
+ /* TRD : any OS on any CPU with GCC 4.1.0 or better
+
+ GCC 4.1.0 introduced the __sync_*() atomic intrinsics
+
+ __GNUC__ / __GNUC_MINOR__ / __GNUC_PATCHLEVEL__ indicates GCC and which version
+ */
+
+ static LFDS611_INLINE lfds611_atom_t lfds611_abstraction_cas( volatile lfds611_atom_t *destination, lfds611_atom_t exchange, lfds611_atom_t compare )
+ {
+ lfds611_atom_t
+ rv;
+
+ assert( destination != NULL );
+ // TRD : exchange can be any value in its range
+ // TRD : compare can be any value in its range
+
+ // TRD : note the different argument order for the GCC instrinsic to the MSVC instrinsic
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ rv = (lfds611_atom_t) __sync_val_compare_and_swap( destination, compare, exchange );
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ return( rv );
+ }
+
+#endif
+
--- /dev/null
+#include "lfds611_abstraction_internal_body.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN64 && defined _MSC_VER)
+
+ /* TRD : 64 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ _WIN64 indicates 64 bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ static LFDS611_INLINE unsigned char lfds611_abstraction_dcas( volatile lfds611_atom_t *destination, lfds611_atom_t *exchange, lfds611_atom_t *compare )
+ {
+ unsigned char
+ cas_result;
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ cas_result = _InterlockedCompareExchange128( (volatile __int64 *) destination, (__int64) *(exchange+1), (__int64) *exchange, (__int64 *) compare );
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ return( cas_result ) ;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER)
+
+ /* TRD : 32 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ (!defined _WIN64 && defined _WIN32) indicates 32 bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ static LFDS611_INLINE unsigned char lfds611_abstraction_dcas( volatile lfds611_atom_t *destination, lfds611_atom_t *exchange, lfds611_atom_t *compare )
+ {
+ __int64
+ original_compare;
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ *(__int64 *) &original_compare = *(__int64 *) compare;
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ *(__int64 *) compare = _InterlockedCompareExchange64( (volatile __int64 *) destination, *(__int64 *) exchange, *(__int64 *) compare );
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ return( (unsigned char) (*(__int64 *) compare == *(__int64 *) &original_compare) );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (defined __x86_64__ && defined __GNUC__)
+
+ /* TRD : any OS on x64 with GCC
+
+ __x86_64__ indicates x64
+ __GNUC__ indicates GCC
+ */
+
+ static LFDS611_INLINE unsigned char lfds611_abstraction_dcas( volatile lfds611_atom_t *destination, lfds611_atom_t *exchange, lfds611_atom_t *compare )
+ {
+ unsigned char
+ cas_result;
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ // TRD : __asm__ with "memory" in the clobber list is for GCC a full compiler barrier
+ __asm__ __volatile__
+ (
+ "lock;" // make cmpxchg16b atomic
+ "cmpxchg16b %0;" // cmpxchg16b sets ZF on success
+ "setz %3;" // if ZF set, set cas_result to 1
+
+ // output
+ : "+m" (*(volatile lfds611_atom_t (*)[2]) destination), "+a" (*compare), "+d" (*(compare+1)), "=q" (cas_result)
+
+ // input
+ : "b" (*exchange), "c" (*(exchange+1))
+
+ // clobbered
+ : "cc", "memory"
+ );
+
+ return( cas_result );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if ((defined __i686__ || defined __arm__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 1 && __GNUC_PATCHLEVEL__ >= 0)
+
+ /* TRD : any OS on x86 or ARM with GCC 4.1.0 or better
+
+ GCC 4.1.0 introduced the __sync_*() atomic intrinsics
+
+ __GNUC__ / __GNUC_MINOR__ / __GNUC_PATCHLEVEL__ indicates GCC and which version
+ */
+
+ static LFDS611_INLINE unsigned char lfds611_abstraction_dcas( volatile lfds611_atom_t *destination, lfds611_atom_t *exchange, lfds611_atom_t *compare )
+ {
+ unsigned char
+ cas_result = 0;
+
+ unsigned long long int
+ original_destination;
+
+ assert( destination != NULL );
+ assert( exchange != NULL );
+ assert( compare != NULL );
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ original_destination = __sync_val_compare_and_swap( (volatile unsigned long long int *) destination, *(unsigned long long int *) compare, *(unsigned long long int *) exchange );
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ if( original_destination == *(unsigned long long int *) compare )
+ cas_result = 1;
+
+ *(unsigned long long int *) compare = original_destination;
+
+ return( cas_result );
+ }
+
+#endif
+
+
--- /dev/null
+#include "lfds611_abstraction_internal_wrapper.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (!defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any OS except Windows kernel on any CPU with any compiler\r
+\r
+ !WIN_KERNEL_BUILD indicates not Windows kernel\r
+ */\r
+\r
+ void lfds611_abstraction_free( void *memory )\r
+ {\r
+ free( memory );\r
+\r
+ return;\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows (kernel) on any CPU with the Microsoft C compiler\r
+\r
+ WIN_KERNEL_BUILD indicates Windows kernel\r
+ */\r
+\r
+ void lfds611_abstraction_free( void *memory )\r
+ {\r
+ ExFreePoolWithTag( memory, 'sdfl' );\r
+\r
+ return;\r
+ }\r
+\r
+#endif\r
+\r
--- /dev/null
+#include "lfds611_abstraction_internal_body.h"
+
+
+
+
+
+/****************************************************************************/
+#if (defined _WIN64 && defined _MSC_VER)
+
+ /* TRD : 64 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ _WIN64 indicates 64 bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ static LFDS611_INLINE lfds611_atom_t lfds611_abstraction_increment( volatile lfds611_atom_t *value )
+ {
+ lfds611_atom_t
+ rv;
+
+ assert( value != NULL );
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ rv = (lfds611_atom_t) _InterlockedIncrement64( (__int64 *) value );
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ return( rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (!defined _WIN64 && defined _WIN32 && defined _MSC_VER)
+
+ /* TRD : 32 bit Windows (user-mode or kernel) on any CPU with the Microsoft C compiler
+
+ (!defined _WIN64 && defined _WIN32) indicates 32 bit Windows
+ _MSC_VER indicates Microsoft C compiler
+ */
+
+ static LFDS611_INLINE lfds611_atom_t lfds611_abstraction_increment( volatile lfds611_atom_t *value )
+ {
+ lfds611_atom_t
+ rv;
+
+ assert( value != NULL );
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ rv = (lfds611_atom_t) _InterlockedIncrement( (long int *) value );
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ return( rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if (__GNUC__ >= 4 && __GNUC_MINOR__ >= 1 && __GNUC_PATCHLEVEL__ >= 0)
+
+ /* TRD : any OS on any CPU with GCC 4.1.0 or better
+
+ GCC 4.1.0 introduced the __sync_*() atomic intrinsics
+
+ __GNUC__ / __GNUC_MINOR__ / __GNUC_PATCHLEVEL__ indicates GCC and which version
+ */
+
+ static LFDS611_INLINE lfds611_atom_t lfds611_abstraction_increment( volatile lfds611_atom_t *value )
+ {
+ lfds611_atom_t
+ rv;
+
+ assert( value != NULL );
+
+ // TRD : no need for casting here, GCC has a __sync_add_and_fetch() for all native types
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ rv = (lfds611_atom_t) __sync_add_and_fetch( value, 1 );
+
+ LFDS611_BARRIER_COMPILER_FULL;
+
+ return( rv );
+ }
+
+#endif
+
--- /dev/null
+/***** private prototypes *****/\r
+\r
--- /dev/null
+/***** the library wide include file *****/\r
+#include "liblfds611_internal.h"\r
+\r
+/***** the internal header body *****/\r
+#include "lfds611_abstraction_internal_body.h"\r
+\r
--- /dev/null
+#include "lfds611_abstraction_internal_wrapper.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (!defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any OS except Windows kernel on any CPU with any compiler\r
+\r
+ !WIN_KERNEL_BUILD indicates not Windows kernel\r
+ */\r
+\r
+ void *lfds611_abstraction_malloc( size_t size )\r
+ {\r
+ return( malloc(size) );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows (kernel) on any CPU with the Microsoft C compiler\r
+\r
+ WIN_KERNEL_BUILD indicates Windows kernel\r
+ */\r
+\r
+ void *lfds611_abstraction_malloc( size_t size )\r
+ {\r
+ return( ExAllocatePoolWithTag(NonPagedPool, size, 'sdfl') );\r
+ }\r
+\r
+#endif\r
+\r
--- /dev/null
+#include "lfds611_freelist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_freelist_delete( struct lfds611_freelist_state *fs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ void\r
+ *user_data;\r
+\r
+ assert( fs != NULL );\r
+ // TRD : user_data_delete_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ // TRD : leading load barrier not required as it will be performed by the pop\r
+\r
+ while( lfds611_freelist_pop(fs, &fe) )\r
+ {\r
+ if( user_data_delete_function != NULL )\r
+ {\r
+ lfds611_freelist_get_user_data_from_element( fe, &user_data );\r
+ user_data_delete_function( user_data, user_state );\r
+ }\r
+\r
+ lfds611_liblfds_aligned_free( fe );\r
+ }\r
+\r
+ lfds611_liblfds_aligned_free( fs );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds611_freelist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void *lfds611_freelist_get_user_data_from_element( struct lfds611_freelist_element *fe, void **user_data )\r
+{\r
+ assert( fe != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ if( user_data != NULL )\r
+ *user_data = fe->user_data;\r
+\r
+ return( fe->user_data );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_freelist_set_user_data_in_element( struct lfds611_freelist_element *fe, void *user_data )\r
+{\r
+ assert( fe != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ fe->user_data = user_data;\r
+\r
+ LFDS611_BARRIER_STORE;\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+/***** the library wide include file *****/\r
+#include "liblfds611_internal.h"\r
+\r
+/***** defines *****/\r
+#define LFDS611_FREELIST_POINTER 0\r
+#define LFDS611_FREELIST_COUNTER 1\r
+#define LFDS611_FREELIST_PAC_SIZE 2\r
+\r
+/***** structures *****/\r
+#pragma pack( push, LFDS611_ALIGN_DOUBLE_POINTER )\r
+\r
+struct lfds611_freelist_state\r
+{\r
+ struct lfds611_freelist_element\r
+ *volatile top[LFDS611_FREELIST_PAC_SIZE];\r
+\r
+ int\r
+ (*user_data_init_function)( void **user_data, void *user_state );\r
+\r
+ void\r
+ *user_state;\r
+\r
+ lfds611_atom_t\r
+ aba_counter,\r
+ element_count;\r
+};\r
+\r
+struct lfds611_freelist_element\r
+{\r
+ struct lfds611_freelist_element\r
+ *next[LFDS611_FREELIST_PAC_SIZE];\r
+\r
+ void\r
+ *user_data;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
+/***** private prototypes *****/\r
+lfds611_atom_t lfds611_freelist_internal_new_element( struct lfds611_freelist_state *fs, struct lfds611_freelist_element **fe );\r
+void lfds611_freelist_internal_validate( struct lfds611_freelist_state *fs, struct lfds611_validation_info *vi, enum lfds611_data_structure_validity *lfds611_freelist_validity );\r
+\r
--- /dev/null
+#include "lfds611_freelist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_freelist_new( struct lfds611_freelist_state **fs, lfds611_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ lfds611_atom_t\r
+ element_count;\r
+\r
+ assert( fs != NULL );\r
+ // TRD : number_elements can be any value in its range\r
+ // TRD : user_data_init_function can be NULL\r
+\r
+ *fs = (struct lfds611_freelist_state *) lfds611_liblfds_aligned_malloc( sizeof(struct lfds611_freelist_state), LFDS611_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( (*fs) != NULL )\r
+ {\r
+ (*fs)->top[LFDS611_FREELIST_POINTER] = NULL;\r
+ (*fs)->top[LFDS611_FREELIST_COUNTER] = 0;\r
+ (*fs)->user_data_init_function = user_data_init_function;\r
+ (*fs)->user_state = user_state;\r
+ (*fs)->aba_counter = 0;\r
+ (*fs)->element_count = 0;\r
+\r
+ element_count = lfds611_freelist_new_elements( *fs, number_elements );\r
+\r
+ if( element_count == number_elements )\r
+ rv = 1;\r
+\r
+ if( element_count != number_elements )\r
+ {\r
+ lfds611_liblfds_aligned_free( (*fs) );\r
+ *fs = NULL;\r
+ }\r
+ }\r
+\r
+ LFDS611_BARRIER_STORE;\r
+\r
+ return( rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds611_freelist_use( struct lfds611_freelist_state *fs )\r
+{\r
+ assert( fs != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+lfds611_atom_t lfds611_freelist_new_elements( struct lfds611_freelist_state *fs, lfds611_atom_t number_elements )\r
+{\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ lfds611_atom_t\r
+ loop,\r
+ count = 0;\r
+\r
+ assert( fs != NULL );\r
+ // TRD : number_elements can be any value in its range\r
+ // TRD : user_data_init_function can be NULL\r
+\r
+ for( loop = 0 ; loop < number_elements ; loop++ )\r
+ if( lfds611_freelist_internal_new_element(fs, &fe) )\r
+ {\r
+ lfds611_freelist_push( fs, fe );\r
+ count++;\r
+ }\r
+\r
+ return( count );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+lfds611_atom_t lfds611_freelist_internal_new_element( struct lfds611_freelist_state *fs, struct lfds611_freelist_element **fe )\r
+{\r
+ lfds611_atom_t\r
+ rv = 0;\r
+\r
+ assert( fs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ /* TRD : basically, does what you'd expect;\r
+\r
+ allocates an element\r
+ calls the user init function\r
+ if anything fails, cleans up,\r
+ sets *fe to NULL\r
+ and returns 0\r
+ */\r
+\r
+ *fe = (struct lfds611_freelist_element *) lfds611_liblfds_aligned_malloc( sizeof(struct lfds611_freelist_element), LFDS611_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( *fe != NULL )\r
+ {\r
+ if( fs->user_data_init_function == NULL )\r
+ {\r
+ (*fe)->user_data = NULL;\r
+ rv = 1;\r
+ }\r
+\r
+ if( fs->user_data_init_function != NULL )\r
+ {\r
+ rv = fs->user_data_init_function( &(*fe)->user_data, fs->user_state );\r
+\r
+ if( rv == 0 )\r
+ {\r
+ lfds611_liblfds_aligned_free( *fe );\r
+ *fe = NULL;\r
+ }\r
+ }\r
+ }\r
+\r
+ if( rv == 1 )\r
+ lfds611_abstraction_increment( (lfds611_atom_t *) &fs->element_count );\r
+\r
+ return( rv );\r
+}\r
+\r
--- /dev/null
+#include "lfds611_freelist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds611_freelist_element *lfds611_freelist_pop( struct lfds611_freelist_state *fs, struct lfds611_freelist_element **fe )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) struct lfds611_freelist_element\r
+ *fe_local[LFDS611_FREELIST_PAC_SIZE];\r
+\r
+ assert( fs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ fe_local[LFDS611_FREELIST_COUNTER] = fs->top[LFDS611_FREELIST_COUNTER];\r
+ fe_local[LFDS611_FREELIST_POINTER] = fs->top[LFDS611_FREELIST_POINTER];\r
+\r
+ /* TRD : note that lfds611_abstraction_dcas loads the original value of the destination (fs->top) into the compare (fe_local)\r
+ (this happens of course after the CAS itself has occurred inside lfds611_abstraction_dcas)\r
+ */\r
+\r
+ do\r
+ {\r
+ if( fe_local[LFDS611_FREELIST_POINTER] == NULL )\r
+ {\r
+ *fe = NULL;\r
+ return( *fe );\r
+ }\r
+ }\r
+ while( 0 == lfds611_abstraction_dcas((volatile lfds611_atom_t *) fs->top, (lfds611_atom_t *) fe_local[LFDS611_FREELIST_POINTER]->next, (lfds611_atom_t *) fe_local) );\r
+\r
+ *fe = (struct lfds611_freelist_element *) fe_local[LFDS611_FREELIST_POINTER];\r
+\r
+ return( *fe );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds611_freelist_element *lfds611_freelist_guaranteed_pop( struct lfds611_freelist_state *fs, struct lfds611_freelist_element **fe )\r
+{\r
+ assert( fs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ lfds611_freelist_internal_new_element( fs, fe );\r
+\r
+ return( *fe );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_freelist_push( struct lfds611_freelist_state *fs, struct lfds611_freelist_element *fe )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) struct lfds611_freelist_element\r
+ *fe_local[LFDS611_FREELIST_PAC_SIZE],\r
+ *original_fe_next[LFDS611_FREELIST_PAC_SIZE];\r
+\r
+ assert( fs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ fe_local[LFDS611_FREELIST_POINTER] = fe;\r
+ fe_local[LFDS611_FREELIST_COUNTER] = (struct lfds611_freelist_element *) lfds611_abstraction_increment( (lfds611_atom_t *) &fs->aba_counter );\r
+\r
+ original_fe_next[LFDS611_FREELIST_POINTER] = fs->top[LFDS611_FREELIST_POINTER];\r
+ original_fe_next[LFDS611_FREELIST_COUNTER] = fs->top[LFDS611_FREELIST_COUNTER];\r
+\r
+ /* TRD : note that lfds611_abstraction_dcas loads the original value of the destination (fs->top) into the compare (original_fe_next)\r
+ (this happens of course after the CAS itself has occurred inside lfds611_abstraction_dcas)\r
+ this then causes us in our loop, should we repeat it, to update fe_local->next to a more\r
+ up-to-date version of the head of the lfds611_freelist\r
+ */\r
+\r
+ do\r
+ {\r
+ fe_local[LFDS611_FREELIST_POINTER]->next[LFDS611_FREELIST_POINTER] = original_fe_next[LFDS611_FREELIST_POINTER];\r
+ fe_local[LFDS611_FREELIST_POINTER]->next[LFDS611_FREELIST_COUNTER] = original_fe_next[LFDS611_FREELIST_COUNTER];\r
+ }\r
+ while( 0 == lfds611_abstraction_dcas((volatile lfds611_atom_t *) fs->top, (lfds611_atom_t *) fe_local, (lfds611_atom_t *) original_fe_next) );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds611_freelist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_freelist_query( struct lfds611_freelist_state *fs, enum lfds611_freelist_query_type query_type, void *query_input, void *query_output )\r
+{\r
+ assert( fs != NULL );\r
+ // TRD : query type can be any value in its range\r
+ // TRD : query_input can be NULL in some cases\r
+ assert( query_output != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ switch( query_type )\r
+ {\r
+ case LFDS611_FREELIST_QUERY_ELEMENT_COUNT:\r
+ assert( query_input == NULL );\r
+\r
+ *(lfds611_atom_t *) query_output = fs->element_count;\r
+ break;\r
+\r
+ case LFDS611_FREELIST_QUERY_VALIDATE:\r
+ // TRD : query_input can be NULL\r
+\r
+ lfds611_freelist_internal_validate( fs, (struct lfds611_validation_info *) query_input, (enum lfds611_data_structure_validity *) query_output );\r
+ break;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_freelist_internal_validate( struct lfds611_freelist_state *fs, struct lfds611_validation_info *vi, enum lfds611_data_structure_validity *lfds611_freelist_validity )\r
+{\r
+ struct lfds611_freelist_element\r
+ *fe,\r
+ *fe_slow,\r
+ *fe_fast;\r
+\r
+ lfds611_atom_t\r
+ element_count = 0;\r
+\r
+ assert( fs != NULL );\r
+ // TRD : vi can be NULL\r
+ assert( lfds611_freelist_validity != NULL );\r
+\r
+ *lfds611_freelist_validity = LFDS611_VALIDITY_VALID;\r
+\r
+ fe_slow = fe_fast = (struct lfds611_freelist_element *) fs->top[LFDS611_FREELIST_POINTER];\r
+\r
+ /* TRD : first, check for a loop\r
+ we have two pointers\r
+ both of which start at the top of the lfds611_freelist\r
+ we enter a loop\r
+ and on each iteration\r
+ we advance one pointer by one element\r
+ and the other by two\r
+\r
+ we exit the loop when both pointers are NULL\r
+ (have reached the end of the lfds611_freelist)\r
+\r
+ or\r
+\r
+ if we fast pointer 'sees' the slow pointer\r
+ which means we have a loop\r
+ */\r
+\r
+ if( fe_slow != NULL )\r
+ do\r
+ {\r
+ fe_slow = fe_slow->next[LFDS611_FREELIST_POINTER];\r
+\r
+ if( fe_fast != NULL )\r
+ fe_fast = fe_fast->next[LFDS611_FREELIST_POINTER];\r
+\r
+ if( fe_fast != NULL )\r
+ fe_fast = fe_fast->next[LFDS611_FREELIST_POINTER];\r
+ }\r
+ while( fe_slow != NULL and fe_fast != fe_slow );\r
+\r
+ if( fe_fast != NULL and fe_slow != NULL and fe_fast == fe_slow )\r
+ *lfds611_freelist_validity = LFDS611_VALIDITY_INVALID_LOOP;\r
+\r
+ /* TRD : now check for expected number of elements\r
+ vi can be NULL, in which case we do not check\r
+ we know we don't have a loop from our earlier check\r
+ */\r
+\r
+ if( *lfds611_freelist_validity == LFDS611_VALIDITY_VALID and vi != NULL )\r
+ {\r
+ fe = (struct lfds611_freelist_element *) fs->top[LFDS611_FREELIST_POINTER];\r
+\r
+ while( fe != NULL )\r
+ {\r
+ element_count++;\r
+ fe = (struct lfds611_freelist_element *) fe->next[LFDS611_FREELIST_POINTER];\r
+ }\r
+\r
+ if( element_count < vi->min_elements )\r
+ *lfds611_freelist_validity = LFDS611_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ if( element_count > vi->max_elements )\r
+ *lfds611_freelist_validity = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds611_liblfds_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_liblfds_abstraction_test_helper_increment_non_atomic( lfds611_atom_t *shared_counter )\r
+{\r
+ /* TRD : lfds611_atom_t must be volatile or the compiler\r
+ optimizes it away into a single store\r
+ */\r
+\r
+ volatile lfds611_atom_t\r
+ count = 0;\r
+\r
+ assert( shared_counter != NULL );\r
+\r
+ while( count++ < 10000000 )\r
+ (*(lfds611_atom_t *) shared_counter)++;\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_liblfds_abstraction_test_helper_increment_atomic( volatile lfds611_atom_t *shared_counter )\r
+{\r
+ lfds611_atom_t\r
+ count = 0;\r
+\r
+ assert( shared_counter != NULL );\r
+\r
+ while( count++ < 10000000 )\r
+ lfds611_abstraction_increment( shared_counter );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_liblfds_abstraction_test_helper_cas( volatile lfds611_atom_t *shared_counter, lfds611_atom_t *local_counter )\r
+{\r
+ lfds611_atom_t\r
+ loop = 0,\r
+ original_destination;\r
+\r
+ LFDS611_ALIGN(LFDS611_ALIGN_SINGLE_POINTER) lfds611_atom_t\r
+ exchange,\r
+ compare;\r
+\r
+ assert( shared_counter != NULL );\r
+ assert( local_counter != NULL );\r
+\r
+ while( loop++ < 1000000 )\r
+ {\r
+ do\r
+ {\r
+ compare = *shared_counter;\r
+ exchange = compare + 1;\r
+\r
+ original_destination = lfds611_abstraction_cas( shared_counter, exchange, compare );\r
+ }\r
+ while( original_destination != compare );\r
+\r
+ (*local_counter)++;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_liblfds_abstraction_test_helper_dcas( volatile lfds611_atom_t *shared_counter, lfds611_atom_t *local_counter )\r
+{\r
+ lfds611_atom_t\r
+ loop = 0;\r
+\r
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) lfds611_atom_t\r
+ exchange[2],\r
+ compare[2];\r
+\r
+ assert( shared_counter != NULL );\r
+ assert( local_counter != NULL );\r
+\r
+ while( loop++ < 1000000 )\r
+ {\r
+ compare[0] = *shared_counter;\r
+ compare[1] = *(shared_counter+1);\r
+\r
+ do\r
+ {\r
+ exchange[0] = compare[0] + 1;\r
+ exchange[1] = compare[1];\r
+ }\r
+ while( 0 == lfds611_abstraction_dcas(shared_counter, exchange, compare) );\r
+\r
+ (*local_counter)++;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds611_liblfds_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_liblfds_aligned_free( void *memory )\r
+{\r
+ assert( memory != NULL );\r
+\r
+ // TRD : the "void *" stored above memory points to the root of the allocation\r
+ lfds611_abstraction_free( *( (void **) memory - 1 ) );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds611_liblfds_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void *lfds611_liblfds_aligned_malloc( size_t size, size_t align_in_bytes )\r
+{\r
+ void\r
+ *original_memory,\r
+ *memory;\r
+\r
+ size_t\r
+ offset;\r
+\r
+ // TRD : size can be any value in its range\r
+ // TRD : align_in_bytes can be any value in its range\r
+\r
+ original_memory = memory = lfds611_abstraction_malloc( size + sizeof(void *) + align_in_bytes );\r
+\r
+ if( memory != NULL )\r
+ {\r
+ memory = (void **) memory + 1;\r
+ offset = align_in_bytes - (size_t) memory % align_in_bytes;\r
+ memory = (unsigned char *) memory + offset;\r
+ *( (void **) memory - 1 ) = original_memory;\r
+ }\r
+\r
+ return( memory );\r
+}\r
+\r
--- /dev/null
+/***** the library wide include file *****/\r
+#include "liblfds611_internal.h"\r
+\r
+\r
--- /dev/null
+This is not a data structure but rather functions internal to the library.\r
+\r
--- /dev/null
+#include "lfds611_queue_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_queue_delete( struct lfds611_queue_state *qs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ void\r
+ *user_data;\r
+\r
+ assert( qs != NULL );\r
+ // TRD : user_data_delete_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ // TRD : leading load barrier not required as it will be performed by the dequeue\r
+\r
+ while( lfds611_queue_dequeue(qs, &user_data) )\r
+ if( user_data_delete_function != NULL )\r
+ user_data_delete_function( user_data, user_state );\r
+\r
+ /* TRD : fully dequeuing will leave us\r
+ with a single dummy element\r
+ which both qs->enqueue and qs->dequeue point at\r
+ we push this back onto the lfds611_freelist\r
+ before we delete the lfds611_freelist\r
+ */\r
+\r
+ lfds611_freelist_push( qs->fs, qs->enqueue[LFDS611_QUEUE_POINTER]->fe );\r
+\r
+ lfds611_freelist_delete( qs->fs, lfds611_queue_internal_freelist_delete_function, NULL );\r
+\r
+ lfds611_liblfds_aligned_free( qs );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds611_queue_internal_freelist_delete_function( void *user_data, void *user_state )\r
+{\r
+ assert( user_data != NULL );\r
+ assert( user_state == NULL );\r
+\r
+ lfds611_liblfds_aligned_free( user_data );\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
--- /dev/null
+/***** the library wide include file *****/\r
+#include "liblfds611_internal.h"\r
+\r
+/***** pragmas *****/\r
+\r
+/***** defines *****/\r
+#define LFDS611_QUEUE_STATE_UNKNOWN -1\r
+#define LFDS611_QUEUE_STATE_EMPTY 0\r
+#define LFDS611_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE 1\r
+#define LFDS611_QUEUE_STATE_ATTEMPT_DELFDS611_QUEUE 2\r
+\r
+#define LFDS611_QUEUE_POINTER 0\r
+#define LFDS611_QUEUE_COUNTER 1\r
+#define LFDS611_QUEUE_PAC_SIZE 2\r
+\r
+/***** structures *****/\r
+#pragma pack( push, LFDS611_ALIGN_DOUBLE_POINTER )\r
+\r
+struct lfds611_queue_state\r
+{\r
+ struct lfds611_queue_element\r
+ *volatile enqueue[LFDS611_QUEUE_PAC_SIZE],\r
+ *volatile dequeue[LFDS611_QUEUE_PAC_SIZE];\r
+\r
+ lfds611_atom_t\r
+ aba_counter;\r
+\r
+ struct lfds611_freelist_state\r
+ *fs;\r
+};\r
+\r
+struct lfds611_queue_element\r
+{\r
+ // TRD : next in a lfds611_queue requires volatile as it is target of CAS\r
+ struct lfds611_queue_element\r
+ *volatile next[LFDS611_QUEUE_PAC_SIZE];\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ void\r
+ *user_data;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
+/***** externs *****/\r
+\r
+/***** private prototypes *****/\r
+int lfds611_queue_internal_freelist_init_function( void **user_data, void *user_state );\r
+void lfds611_queue_internal_freelist_delete_function( void *user_data, void *user_state );\r
+\r
+void lfds611_queue_internal_new_element_from_freelist( struct lfds611_queue_state *qs, struct lfds611_queue_element *qe[LFDS611_QUEUE_PAC_SIZE], void *user_data );\r
+void lfds611_queue_internal_guaranteed_new_element_from_freelist( struct lfds611_queue_state *qs, struct lfds611_queue_element * qe[LFDS611_QUEUE_PAC_SIZE], void *user_data );\r
+void lfds611_queue_internal_init_element( struct lfds611_queue_state *qs, struct lfds611_queue_element *qe[LFDS611_QUEUE_PAC_SIZE], struct lfds611_freelist_element *fe, void *user_data );\r
+\r
+void lfds611_queue_internal_queue( struct lfds611_queue_state *qs, struct lfds611_queue_element *qe[LFDS611_QUEUE_PAC_SIZE] );\r
+\r
+void lfds611_queue_internal_validate( struct lfds611_queue_state *qs, struct lfds611_validation_info *vi, enum lfds611_data_structure_validity *lfds611_queue_validity, enum lfds611_data_structure_validity *lfds611_freelist_validity );\r
+\r
--- /dev/null
+#include "lfds611_queue_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_queue_new( struct lfds611_queue_state **qs, lfds611_atom_t number_elements )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ struct lfds611_queue_element\r
+ *qe[LFDS611_QUEUE_PAC_SIZE];\r
+\r
+ assert( qs != NULL );\r
+ // TRD : number_elements can be any value in its range\r
+\r
+ *qs = (struct lfds611_queue_state *) lfds611_liblfds_aligned_malloc( sizeof(struct lfds611_queue_state), LFDS611_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( *qs != NULL )\r
+ {\r
+ // TRD : the size of the lfds611_freelist is the size of the lfds611_queue (+1 for the leading dummy element, which is hidden from the caller)\r
+ lfds611_freelist_new( &(*qs)->fs, number_elements+1, lfds611_queue_internal_freelist_init_function, NULL );\r
+\r
+ if( (*qs)->fs != NULL )\r
+ {\r
+ lfds611_queue_internal_new_element_from_freelist( *qs, qe, NULL );\r
+ (*qs)->enqueue[LFDS611_QUEUE_POINTER] = (*qs)->dequeue[LFDS611_QUEUE_POINTER] = qe[LFDS611_QUEUE_POINTER];\r
+ (*qs)->enqueue[LFDS611_QUEUE_COUNTER] = (*qs)->dequeue[LFDS611_QUEUE_COUNTER] = 0;\r
+ (*qs)->aba_counter = 0;\r
+ rv = 1;\r
+ }\r
+\r
+ if( (*qs)->fs == NULL )\r
+ {\r
+ lfds611_liblfds_aligned_free( *qs );\r
+ *qs = NULL;\r
+ }\r
+ }\r
+\r
+ LFDS611_BARRIER_STORE;\r
+\r
+ return( rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds611_queue_use( struct lfds611_queue_state *qs )\r
+{\r
+ assert( qs != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+int lfds611_queue_internal_freelist_init_function( void **user_data, void *user_state )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ assert( user_data != NULL );\r
+ assert( user_state == NULL );\r
+\r
+ *user_data = lfds611_liblfds_aligned_malloc( sizeof(struct lfds611_queue_element), LFDS611_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( *user_data != NULL )\r
+ rv = 1;\r
+\r
+ return( rv );\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_queue_internal_new_element_from_freelist( struct lfds611_queue_state *qs, struct lfds611_queue_element *qe[LFDS611_QUEUE_PAC_SIZE], void *user_data )\r
+{\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ assert( qs != NULL );\r
+ assert( qe != NULL );\r
+ // TRD : user_data can be any value in its range\r
+\r
+ qe[LFDS611_QUEUE_POINTER] = NULL;\r
+\r
+ lfds611_freelist_pop( qs->fs, &fe );\r
+\r
+ if( fe != NULL )\r
+ lfds611_queue_internal_init_element( qs, qe, fe, user_data );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_queue_internal_guaranteed_new_element_from_freelist( struct lfds611_queue_state *qs, struct lfds611_queue_element *qe[LFDS611_QUEUE_PAC_SIZE], void *user_data )\r
+{\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ assert( qs != NULL );\r
+ assert( qe != NULL );\r
+ // TRD : user_data can be any value in its range\r
+\r
+ qe[LFDS611_QUEUE_POINTER] = NULL;\r
+\r
+ lfds611_freelist_guaranteed_pop( qs->fs, &fe );\r
+\r
+ if( fe != NULL )\r
+ lfds611_queue_internal_init_element( qs, qe, fe, user_data );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_queue_internal_init_element( struct lfds611_queue_state *qs, struct lfds611_queue_element *qe[LFDS611_QUEUE_PAC_SIZE], struct lfds611_freelist_element *fe, void *user_data )\r
+{\r
+ assert( qs != NULL );\r
+ assert( qe != NULL );\r
+ assert( fe != NULL );\r
+ // TRD : user_data can be any value in its range\r
+\r
+ lfds611_freelist_get_user_data_from_element( fe, (void **) &qe[LFDS611_QUEUE_POINTER] );\r
+ qe[LFDS611_QUEUE_COUNTER] = (struct lfds611_queue_element *) lfds611_abstraction_increment( (lfds611_atom_t *) &qs->aba_counter );\r
+\r
+ qe[LFDS611_QUEUE_POINTER]->next[LFDS611_QUEUE_POINTER] = NULL;\r
+ qe[LFDS611_QUEUE_POINTER]->next[LFDS611_QUEUE_COUNTER] = (struct lfds611_queue_element *) lfds611_abstraction_increment( (lfds611_atom_t *) &qs->aba_counter );\r
+\r
+ qe[LFDS611_QUEUE_POINTER]->fe = fe;\r
+ qe[LFDS611_QUEUE_POINTER]->user_data = user_data;\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds611_queue_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds611_queue_query( struct lfds611_queue_state *qs, enum lfds611_queue_query_type query_type, void *query_input, void *query_output )\r
+{\r
+ assert( qs != NULL );\r
+ // TRD : query_type can be any value in its range\r
+ // TRD : query_input can be NULL\r
+ assert( query_output != NULL );\r
+\r
+ switch( query_type )\r
+ {\r
+ case LFDS611_QUEUE_QUERY_ELEMENT_COUNT:\r
+ assert( query_input == NULL );\r
+\r
+ lfds611_freelist_query( qs->fs, LFDS611_FREELIST_QUERY_ELEMENT_COUNT, NULL, query_output );\r
+ break;\r
+\r
+ case LFDS611_QUEUE_QUERY_VALIDATE:\r
+ // TRD : query_input can be NULL\r
+\r
+ lfds611_queue_internal_validate( qs, (struct lfds611_validation_info *) query_input, (enum lfds611_data_structure_validity *) query_output, ((enum lfds611_data_structure_validity *) query_output)+1 );\r
+ break;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_queue_internal_validate( struct lfds611_queue_state *qs, struct lfds611_validation_info *vi, enum lfds611_data_structure_validity *lfds611_queue_validity, enum lfds611_data_structure_validity *lfds611_freelist_validity )\r
+{\r
+ struct lfds611_queue_element\r
+ *qe,\r
+ *qe_slow,\r
+ *qe_fast;\r
+\r
+ lfds611_atom_t\r
+ element_count = 0,\r
+ total_elements;\r
+\r
+ struct lfds611_validation_info\r
+ lfds611_freelist_vi;\r
+\r
+ assert( qs != NULL );\r
+ // TRD : vi can be NULL\r
+ assert( lfds611_queue_validity != NULL );\r
+ assert( lfds611_freelist_validity != NULL );\r
+\r
+ *lfds611_queue_validity = LFDS611_VALIDITY_VALID;\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ qe_slow = qe_fast = (struct lfds611_queue_element *) qs->dequeue[LFDS611_QUEUE_POINTER];\r
+\r
+ /* TRD : first, check for a loop\r
+ we have two pointers\r
+ both of which start at the dequeue end of the lfds611_queue\r
+ we enter a loop\r
+ and on each iteration\r
+ we advance one pointer by one element\r
+ and the other by two\r
+\r
+ we exit the loop when both pointers are NULL\r
+ (have reached the end of the lfds611_queue)\r
+\r
+ or\r
+\r
+ if we fast pointer 'sees' the slow pointer\r
+ which means we have a loop\r
+ */\r
+\r
+ if( qe_slow != NULL )\r
+ do\r
+ {\r
+ qe_slow = qe_slow->next[LFDS611_QUEUE_POINTER];\r
+\r
+ if( qe_fast != NULL )\r
+ qe_fast = qe_fast->next[LFDS611_QUEUE_POINTER];\r
+\r
+ if( qe_fast != NULL )\r
+ qe_fast = qe_fast->next[LFDS611_QUEUE_POINTER];\r
+ }\r
+ while( qe_slow != NULL and qe_fast != qe_slow );\r
+\r
+ if( qe_fast != NULL and qe_slow != NULL and qe_fast == qe_slow )\r
+ *lfds611_queue_validity = LFDS611_VALIDITY_INVALID_LOOP;\r
+\r
+ /* TRD : now check for expected number of elements\r
+ vi can be NULL, in which case we do not check\r
+ we know we don't have a loop from our earlier check\r
+ */\r
+\r
+ if( *lfds611_queue_validity == LFDS611_VALIDITY_VALID and vi != NULL )\r
+ {\r
+ qe = (struct lfds611_queue_element *) qs->dequeue[LFDS611_QUEUE_POINTER];\r
+\r
+ while( qe != NULL )\r
+ {\r
+ element_count++;\r
+ qe = (struct lfds611_queue_element *) qe->next[LFDS611_QUEUE_POINTER];\r
+ }\r
+\r
+ /* TRD : remember there is a dummy element in the lfds611_queue */\r
+ element_count--;\r
+\r
+ if( element_count < vi->min_elements )\r
+ *lfds611_queue_validity = LFDS611_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ if( element_count > vi->max_elements )\r
+ *lfds611_queue_validity = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+ }\r
+\r
+ /* TRD : now we validate the lfds611_freelist\r
+\r
+ we may be able to check for the expected number of\r
+ elements in the lfds611_freelist\r
+\r
+ if the caller has given us an expected min and max\r
+ number of elements in the lfds611_queue, then the total number\r
+ of elements in the lfds611_freelist, minus that min and max,\r
+ gives us the expected number of elements in the\r
+ lfds611_freelist\r
+ */\r
+\r
+ if( vi != NULL )\r
+ {\r
+ lfds611_freelist_query( qs->fs, LFDS611_FREELIST_QUERY_ELEMENT_COUNT, NULL, (void *) &total_elements );\r
+\r
+ /* TRD : remember there is a dummy element in the lfds611_queue */\r
+ total_elements--;\r
+\r
+ lfds611_freelist_vi.min_elements = total_elements - vi->max_elements;\r
+ lfds611_freelist_vi.max_elements = total_elements - vi->min_elements;\r
+\r
+ lfds611_freelist_query( qs->fs, LFDS611_FREELIST_QUERY_VALIDATE, (void *) &lfds611_freelist_vi, (void *) lfds611_freelist_validity );\r
+ }\r
+\r
+ if( vi == NULL )\r
+ lfds611_freelist_query( qs->fs, LFDS611_FREELIST_QUERY_VALIDATE, NULL, (void *) lfds611_freelist_validity );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds611_queue_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_queue_enqueue( struct lfds611_queue_state *qs, void *user_data )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) struct lfds611_queue_element\r
+ *qe[LFDS611_QUEUE_PAC_SIZE];\r
+\r
+ assert( qs != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ lfds611_queue_internal_new_element_from_freelist( qs, qe, user_data );\r
+\r
+ if( qe[LFDS611_QUEUE_POINTER] == NULL )\r
+ return( 0 );\r
+\r
+ lfds611_queue_internal_queue( qs, qe );\r
+\r
+ return( 1 );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_queue_guaranteed_enqueue( struct lfds611_queue_state *qs, void *user_data )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) struct lfds611_queue_element\r
+ *qe[LFDS611_QUEUE_PAC_SIZE];\r
+\r
+ assert( qs != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ lfds611_queue_internal_guaranteed_new_element_from_freelist( qs, qe, user_data );\r
+\r
+ if( qe[LFDS611_QUEUE_POINTER] == NULL )\r
+ return( 0 );\r
+\r
+ lfds611_queue_internal_queue( qs, qe );\r
+\r
+ return( 1 );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_queue_internal_queue( struct lfds611_queue_state *qs, struct lfds611_queue_element *qe[LFDS611_QUEUE_PAC_SIZE] )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) struct lfds611_queue_element\r
+ *enqueue[LFDS611_QUEUE_PAC_SIZE],\r
+ *next[LFDS611_QUEUE_PAC_SIZE];\r
+\r
+ unsigned char\r
+ cas_result = 0;\r
+\r
+ assert( qs != NULL );\r
+ assert( qe != NULL );\r
+\r
+ // TRD : the DCAS operation issues a read and write barrier, so we don't need a read barrier in the do() loop\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ do\r
+ {\r
+ enqueue[LFDS611_QUEUE_POINTER] = qs->enqueue[LFDS611_QUEUE_POINTER];\r
+ enqueue[LFDS611_QUEUE_COUNTER] = qs->enqueue[LFDS611_QUEUE_COUNTER];\r
+\r
+ next[LFDS611_QUEUE_POINTER] = enqueue[LFDS611_QUEUE_POINTER]->next[LFDS611_QUEUE_POINTER];\r
+ next[LFDS611_QUEUE_COUNTER] = enqueue[LFDS611_QUEUE_POINTER]->next[LFDS611_QUEUE_COUNTER];\r
+\r
+ /* TRD : this if() ensures that the next we read, just above,\r
+ really is from qs->enqueue (which we copied into enqueue)\r
+ */\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ if( qs->enqueue[LFDS611_QUEUE_POINTER] == enqueue[LFDS611_QUEUE_POINTER] and qs->enqueue[LFDS611_QUEUE_COUNTER] == enqueue[LFDS611_QUEUE_COUNTER] )\r
+ {\r
+ if( next[LFDS611_QUEUE_POINTER] == NULL )\r
+ {\r
+ qe[LFDS611_QUEUE_COUNTER] = next[LFDS611_QUEUE_COUNTER] + 1;\r
+ cas_result = lfds611_abstraction_dcas( (volatile lfds611_atom_t *) enqueue[LFDS611_QUEUE_POINTER]->next, (lfds611_atom_t *) qe, (lfds611_atom_t *) next );\r
+ }\r
+ else\r
+ {\r
+ next[LFDS611_QUEUE_COUNTER] = enqueue[LFDS611_QUEUE_COUNTER] + 1;\r
+ lfds611_abstraction_dcas( (volatile lfds611_atom_t *) qs->enqueue, (lfds611_atom_t *) next, (lfds611_atom_t *) enqueue );\r
+ }\r
+ }\r
+ }\r
+ while( cas_result == 0 );\r
+\r
+ qe[LFDS611_QUEUE_COUNTER] = enqueue[LFDS611_QUEUE_COUNTER] + 1;\r
+ lfds611_abstraction_dcas( (volatile lfds611_atom_t *) qs->enqueue, (lfds611_atom_t *) qe, (lfds611_atom_t *) enqueue );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_queue_dequeue( struct lfds611_queue_state *qs, void **user_data )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) struct lfds611_queue_element\r
+ *enqueue[LFDS611_QUEUE_PAC_SIZE],\r
+ *dequeue[LFDS611_QUEUE_PAC_SIZE],\r
+ *next[LFDS611_QUEUE_PAC_SIZE];\r
+\r
+ unsigned char\r
+ cas_result = 0;\r
+\r
+ int\r
+ rv = 1,\r
+ state = LFDS611_QUEUE_STATE_UNKNOWN,\r
+ finished_flag = LOWERED;\r
+\r
+ assert( qs != NULL );\r
+ assert( user_data != NULL );\r
+\r
+ // TRD : the DCAS operation issues a read and write barrier, so we don't need a read barrier in the do() loop\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ do\r
+ {\r
+ dequeue[LFDS611_QUEUE_POINTER] = qs->dequeue[LFDS611_QUEUE_POINTER];\r
+ dequeue[LFDS611_QUEUE_COUNTER] = qs->dequeue[LFDS611_QUEUE_COUNTER];\r
+\r
+ enqueue[LFDS611_QUEUE_POINTER] = qs->enqueue[LFDS611_QUEUE_POINTER];\r
+ enqueue[LFDS611_QUEUE_COUNTER] = qs->enqueue[LFDS611_QUEUE_COUNTER];\r
+\r
+ next[LFDS611_QUEUE_POINTER] = dequeue[LFDS611_QUEUE_POINTER]->next[LFDS611_QUEUE_POINTER];\r
+ next[LFDS611_QUEUE_COUNTER] = dequeue[LFDS611_QUEUE_POINTER]->next[LFDS611_QUEUE_COUNTER];\r
+\r
+ /* TRD : confirm that dequeue didn't move between reading it\r
+ and reading its next pointer\r
+ */\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ if( dequeue[LFDS611_QUEUE_POINTER] == qs->dequeue[LFDS611_QUEUE_POINTER] and dequeue[LFDS611_QUEUE_COUNTER] == qs->dequeue[LFDS611_QUEUE_COUNTER] )\r
+ {\r
+ if( enqueue[LFDS611_QUEUE_POINTER] == dequeue[LFDS611_QUEUE_POINTER] and next[LFDS611_QUEUE_POINTER] == NULL )\r
+ state = LFDS611_QUEUE_STATE_EMPTY;\r
+\r
+ if( enqueue[LFDS611_QUEUE_POINTER] == dequeue[LFDS611_QUEUE_POINTER] and next[LFDS611_QUEUE_POINTER] != NULL )\r
+ state = LFDS611_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE;\r
+\r
+ if( enqueue[LFDS611_QUEUE_POINTER] != dequeue[LFDS611_QUEUE_POINTER] )\r
+ state = LFDS611_QUEUE_STATE_ATTEMPT_DELFDS611_QUEUE;\r
+\r
+ switch( state )\r
+ {\r
+ case LFDS611_QUEUE_STATE_EMPTY:\r
+ *user_data = NULL;\r
+ rv = 0;\r
+ finished_flag = RAISED;\r
+ break;\r
+\r
+ case LFDS611_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE:\r
+ next[LFDS611_QUEUE_COUNTER] = enqueue[LFDS611_QUEUE_COUNTER] + 1;\r
+ lfds611_abstraction_dcas( (volatile lfds611_atom_t *) qs->enqueue, (lfds611_atom_t *) next, (lfds611_atom_t *) enqueue );\r
+ break;\r
+\r
+ case LFDS611_QUEUE_STATE_ATTEMPT_DELFDS611_QUEUE:\r
+ *user_data = next[LFDS611_QUEUE_POINTER]->user_data;\r
+\r
+ next[LFDS611_QUEUE_COUNTER] = dequeue[LFDS611_QUEUE_COUNTER] + 1;\r
+ cas_result = lfds611_abstraction_dcas( (volatile lfds611_atom_t *) qs->dequeue, (lfds611_atom_t *) next, (lfds611_atom_t *) dequeue );\r
+\r
+ if( cas_result == 1 )\r
+ finished_flag = RAISED;\r
+ break;\r
+ }\r
+ }\r
+ }\r
+ while( finished_flag == LOWERED );\r
+\r
+ if( cas_result == 1 )\r
+ lfds611_freelist_push( qs->fs, dequeue[LFDS611_QUEUE_POINTER]->fe );\r
+\r
+ return( rv );\r
+}\r
+\r
--- /dev/null
+#include "lfds611_ringbuffer_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_ringbuffer_delete( struct lfds611_ringbuffer_state *rs, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ assert( rs != NULL );\r
+ // TRD : user_data_delete_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ lfds611_queue_delete( rs->qs, NULL, NULL );\r
+\r
+ lfds611_freelist_delete( rs->fs, user_data_delete_function, user_state );\r
+\r
+ lfds611_liblfds_aligned_free( rs );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds611_ringbuffer_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds611_freelist_element *lfds611_ringbuffer_get_read_element( struct lfds611_ringbuffer_state *rs, struct lfds611_freelist_element **fe )\r
+{\r
+ assert( rs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ lfds611_queue_dequeue( rs->qs, (void **) fe );\r
+\r
+ return( *fe );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds611_freelist_element *lfds611_ringbuffer_get_write_element( struct lfds611_ringbuffer_state *rs, struct lfds611_freelist_element **fe, int *overwrite_flag )\r
+{\r
+ assert( rs != NULL );\r
+ assert( fe != NULL );\r
+ // TRD : overwrite_flag can be NULL\r
+\r
+ /* TRD : we try to obtain an element from the lfds611_freelist\r
+ if we can, we populate it and add it to the lfds611_queue\r
+\r
+ if we cannot, then the lfds611_ringbuffer is full\r
+ so instead we grab the current read element and\r
+ use that instead\r
+\r
+ dequeue may fail since the lfds611_queue may be emptied\r
+ during our dequeue attempt\r
+\r
+ so what we actually do here is a loop, attempting\r
+ the lfds611_freelist and if it fails then a dequeue, until\r
+ we obtain an element\r
+\r
+ once we have an element, we lfds611_queue it\r
+\r
+ you may be wondering why this operation is in a loop\r
+ remember - these operations are lock-free; anything\r
+ can happen in between\r
+\r
+ so for example the pop could fail because the lfds611_freelist\r
+ is empty; but by the time we go to get an element from\r
+ the lfds611_queue, the whole lfds611_queue has been emptied back into\r
+ the lfds611_freelist!\r
+\r
+ if overwrite_flag is provided, we set it to 0 if we\r
+ obtained a new element from the lfds611_freelist, 1 if we\r
+ stole an element from the lfds611_queue\r
+ */\r
+\r
+ do\r
+ {\r
+ if( overwrite_flag != NULL )\r
+ *overwrite_flag = 0;\r
+\r
+ lfds611_freelist_pop( rs->fs, fe );\r
+\r
+ if( *fe == NULL )\r
+ {\r
+ lfds611_ringbuffer_get_read_element( rs, fe );\r
+\r
+ if( overwrite_flag != NULL and *fe != NULL )\r
+ *overwrite_flag = 1;\r
+ }\r
+ }\r
+ while( *fe == NULL );\r
+\r
+ return( *fe );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_ringbuffer_put_read_element( struct lfds611_ringbuffer_state *rs, struct lfds611_freelist_element *fe )\r
+{\r
+ assert( rs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ lfds611_freelist_push( rs->fs, fe );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_ringbuffer_put_write_element( struct lfds611_ringbuffer_state *rs, struct lfds611_freelist_element *fe )\r
+{\r
+ assert( rs != NULL );\r
+ assert( fe != NULL );\r
+\r
+ lfds611_queue_enqueue( rs->qs, fe );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+/***** the library wide include file *****/\r
+#include "liblfds611_internal.h"\r
+\r
+/***** defines *****/\r
+\r
+/***** structures *****/\r
+#pragma pack( push, LFDS611_ALIGN_DOUBLE_POINTER )\r
+\r
+struct lfds611_ringbuffer_state\r
+{\r
+ struct lfds611_queue_state\r
+ *qs;\r
+\r
+ struct lfds611_freelist_state\r
+ *fs;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
+/***** externs *****/\r
+\r
+/***** private prototypes *****/\r
+void lfds611_ringbuffer_internal_validate( struct lfds611_ringbuffer_state *rs, struct lfds611_validation_info *vi, enum lfds611_data_structure_validity *lfds611_queue_validity, enum lfds611_data_structure_validity *lfds611_freelist_validity );\r
+\r
--- /dev/null
+#include "lfds611_ringbuffer_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_ringbuffer_new( struct lfds611_ringbuffer_state **rs, lfds611_atom_t number_elements, int (*user_data_init_function)(void **user_data, void *user_state), void *user_state )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ assert( rs != NULL );\r
+ // TRD : number_elements can be any value in its range\r
+ // TRD : user_data_init_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ *rs = (struct lfds611_ringbuffer_state *) lfds611_liblfds_aligned_malloc( sizeof(struct lfds611_ringbuffer_state), LFDS611_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( *rs != NULL )\r
+ {\r
+ lfds611_freelist_new( &(*rs)->fs, number_elements, user_data_init_function, user_state );\r
+\r
+ if( (*rs)->fs != NULL )\r
+ {\r
+ lfds611_queue_new( &(*rs)->qs, number_elements );\r
+\r
+ if( (*rs)->qs != NULL )\r
+ rv = 1;\r
+\r
+ if( (*rs)->qs == NULL )\r
+ {\r
+ lfds611_liblfds_aligned_free( *rs );\r
+ *rs = NULL;\r
+ }\r
+ }\r
+\r
+ if( (*rs)->fs == NULL )\r
+ {\r
+ lfds611_liblfds_aligned_free( *rs );\r
+ *rs = NULL;\r
+ }\r
+ }\r
+\r
+ LFDS611_BARRIER_STORE;\r
+\r
+ return( rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds611_ringbuffer_use( struct lfds611_ringbuffer_state *rs )\r
+{\r
+ assert( rs != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
--- /dev/null
+#include "lfds611_ringbuffer_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds611_ringbuffer_query( struct lfds611_ringbuffer_state *rs, enum lfds611_ringbuffer_query_type query_type, void *query_input, void *query_output )\r
+{\r
+ assert( rs != NULL );\r
+ // TRD : query_type can be any value in its range\r
+ // TRD : query_input can be NULL\r
+ assert( query_output != NULL );\r
+\r
+ switch( query_type )\r
+ {\r
+ case LFDS611_RINGBUFFER_QUERY_VALIDATE:\r
+ // TRD : query_input can be NULL\r
+\r
+ lfds611_ringbuffer_internal_validate( rs, (struct lfds611_validation_info *) query_input, (enum lfds611_data_structure_validity *) query_output, ((enum lfds611_data_structure_validity *) query_output)+2 );\r
+ break;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_ringbuffer_internal_validate( struct lfds611_ringbuffer_state *rs, struct lfds611_validation_info *vi, enum lfds611_data_structure_validity *lfds611_queue_validity, enum lfds611_data_structure_validity *lfds611_freelist_validity )\r
+{\r
+ assert( rs != NULL );\r
+ // TRD : vi can be NULL\r
+ assert( lfds611_queue_validity != NULL );\r
+ assert( lfds611_freelist_validity != NULL );\r
+\r
+ lfds611_queue_query( rs->qs, LFDS611_QUEUE_QUERY_VALIDATE, vi, lfds611_queue_validity );\r
+\r
+ if( vi != NULL )\r
+ {\r
+ struct lfds611_validation_info\r
+ lfds611_freelist_vi;\r
+\r
+ lfds611_atom_t\r
+ total_elements;\r
+\r
+ lfds611_freelist_query( rs->fs, LFDS611_FREELIST_QUERY_ELEMENT_COUNT, NULL, (void *) &total_elements );\r
+ lfds611_freelist_vi.min_elements = total_elements - vi->max_elements;\r
+ lfds611_freelist_vi.max_elements = total_elements - vi->min_elements;\r
+ lfds611_freelist_query( rs->fs, LFDS611_FREELIST_QUERY_VALIDATE, (void *) &lfds611_freelist_vi, (void *) lfds611_freelist_validity );\r
+ }\r
+\r
+ if( vi == NULL )\r
+ lfds611_freelist_query( rs->fs, LFDS611_FREELIST_QUERY_VALIDATE, NULL, (void *) lfds611_freelist_validity );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds611_slist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_slist_delete( struct lfds611_slist_state *ss )\r
+{\r
+ lfds611_slist_single_threaded_physically_delete_all_elements( ss );\r
+\r
+ lfds611_liblfds_aligned_free( ss );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_slist_logically_delete_element( struct lfds611_slist_state *ss, struct lfds611_slist_element *se )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) void\r
+ *volatile user_data_and_flags[2],\r
+ *volatile new_user_data_and_flags[2];\r
+\r
+ unsigned char\r
+ cas_rv = 0;\r
+\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ user_data_and_flags[LFDS611_SLIST_USER_DATA] = se->user_data_and_flags[LFDS611_SLIST_USER_DATA];\r
+ user_data_and_flags[LFDS611_SLIST_FLAGS] = se->user_data_and_flags[LFDS611_SLIST_FLAGS];\r
+\r
+ do\r
+ {\r
+ new_user_data_and_flags[LFDS611_SLIST_USER_DATA] = user_data_and_flags[LFDS611_SLIST_USER_DATA];\r
+ new_user_data_and_flags[LFDS611_SLIST_FLAGS] = (void *) ((lfds611_atom_t) user_data_and_flags[LFDS611_SLIST_FLAGS] | LFDS611_SLIST_FLAG_DELETED);\r
+ }\r
+ while( !((lfds611_atom_t) user_data_and_flags[LFDS611_SLIST_FLAGS] & LFDS611_SLIST_FLAG_DELETED) and 0 == (cas_rv = lfds611_abstraction_dcas((volatile lfds611_atom_t *) se->user_data_and_flags, (lfds611_atom_t *) new_user_data_and_flags, (lfds611_atom_t *) user_data_and_flags)) );\r
+\r
+ if( cas_rv == 1 )\r
+ if( ss->user_data_delete_function != NULL )\r
+ ss->user_data_delete_function( (void *) user_data_and_flags[LFDS611_SLIST_USER_DATA], ss->user_state );\r
+\r
+ return( cas_rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_slist_single_threaded_physically_delete_all_elements( struct lfds611_slist_state *ss )\r
+{\r
+ struct lfds611_slist_element\r
+ *volatile se,\r
+ *volatile se_temp;\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ se = ss->head;\r
+\r
+ while( se != NULL )\r
+ {\r
+ // TRD : if a non-deleted element and there is a delete function, call the delete function\r
+ if( ss->user_data_delete_function != NULL )\r
+ ss->user_data_delete_function( (void *) se->user_data_and_flags[LFDS611_SLIST_USER_DATA], ss->user_state );\r
+\r
+ se_temp = se;\r
+ se = se->next;\r
+ lfds611_liblfds_aligned_free( (void *) se_temp );\r
+ }\r
+\r
+ lfds611_slist_internal_init_slist( ss, ss->user_data_delete_function, ss->user_state );\r
+\r
+ LFDS611_BARRIER_STORE;\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds611_slist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_slist_get_user_data_from_element( struct lfds611_slist_element *se, void **user_data )\r
+{\r
+ int\r
+ rv = 1;\r
+\r
+ assert( se != NULL );\r
+ assert( user_data != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ *user_data = (void *) se->user_data_and_flags[LFDS611_SLIST_USER_DATA];\r
+\r
+ if( (lfds611_atom_t) se->user_data_and_flags[LFDS611_SLIST_FLAGS] & LFDS611_SLIST_FLAG_DELETED )\r
+ rv = 0;\r
+\r
+ return( rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_slist_set_user_data_in_element( struct lfds611_slist_element *se, void *user_data )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) void\r
+ *user_data_and_flags[2],\r
+ *new_user_data_and_flags[2];\r
+\r
+ int\r
+ rv = 1;\r
+\r
+ assert( se != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ user_data_and_flags[LFDS611_SLIST_USER_DATA] = se->user_data_and_flags[LFDS611_SLIST_USER_DATA];\r
+ user_data_and_flags[LFDS611_SLIST_FLAGS] = se->user_data_and_flags[LFDS611_SLIST_FLAGS];\r
+\r
+ new_user_data_and_flags[LFDS611_SLIST_USER_DATA] = user_data;\r
+\r
+ do\r
+ {\r
+ new_user_data_and_flags[LFDS611_SLIST_FLAGS] = user_data_and_flags[LFDS611_SLIST_FLAGS];\r
+ }\r
+ while( !((lfds611_atom_t) user_data_and_flags[LFDS611_SLIST_FLAGS] & LFDS611_SLIST_FLAG_DELETED) and\r
+ 0 == lfds611_abstraction_dcas((volatile lfds611_atom_t *) se->user_data_and_flags, (lfds611_atom_t *) new_user_data_and_flags, (lfds611_atom_t *) user_data_and_flags) );\r
+\r
+ if( (lfds611_atom_t) user_data_and_flags[LFDS611_SLIST_FLAGS] & LFDS611_SLIST_FLAG_DELETED )\r
+ rv = 0;\r
+\r
+ LFDS611_BARRIER_STORE;\r
+\r
+ return( rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds611_slist_element *lfds611_slist_get_head( struct lfds611_slist_state *ss, struct lfds611_slist_element **se )\r
+{\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ *se = (struct lfds611_slist_element *) ss->head;\r
+\r
+ lfds611_slist_internal_move_to_first_undeleted_element( se );\r
+\r
+ return( *se );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds611_slist_element *lfds611_slist_get_next( struct lfds611_slist_element *se, struct lfds611_slist_element **next_se )\r
+{\r
+ assert( se != NULL );\r
+ assert( next_se != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ *next_se = (struct lfds611_slist_element *) se->next;\r
+\r
+ lfds611_slist_internal_move_to_first_undeleted_element( next_se );\r
+\r
+ return( *next_se );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds611_slist_element *lfds611_slist_get_head_and_then_next( struct lfds611_slist_state *ss, struct lfds611_slist_element **se )\r
+{\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+\r
+ if( *se == NULL )\r
+ lfds611_slist_get_head( ss, se );\r
+ else\r
+ lfds611_slist_get_next( *se, se );\r
+\r
+ return( *se );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_slist_internal_move_to_first_undeleted_element( struct lfds611_slist_element **se )\r
+{\r
+ assert( se != NULL );\r
+\r
+ while( *se != NULL and (lfds611_atom_t) (*se)->user_data_and_flags[LFDS611_SLIST_FLAGS] & LFDS611_SLIST_FLAG_DELETED )\r
+ (*se) = (struct lfds611_slist_element *) (*se)->next;\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+/***** the library wide include file *****/\r
+#include "liblfds611_internal.h"\r
+\r
+/***** defines *****/\r
+#define LFDS611_SLIST_USER_DATA 0\r
+#define LFDS611_SLIST_FLAGS 1\r
+\r
+#define LFDS611_SLIST_NO_FLAGS 0x0\r
+#define LFDS611_SLIST_FLAG_DELETED 0x1\r
+\r
+/***** structures *****/\r
+#pragma pack( push, LFDS611_ALIGN_SINGLE_POINTER )\r
+\r
+struct lfds611_slist_state\r
+{\r
+ struct lfds611_slist_element\r
+ *volatile head;\r
+\r
+ void\r
+ (*user_data_delete_function)( void *user_data, void *user_state ),\r
+ *user_state;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
+#pragma pack( push, LFDS611_ALIGN_DOUBLE_POINTER )\r
+\r
+/* TRD : this pragma pack doesn't seem to work under Windows\r
+ if the structure members are the correct way round\r
+ (next first), then user_data_and_flags ends up on\r
+ a single pointer boundary and DCAS crashes\r
+\r
+ accordingly, I've moved user_data_and_flags first\r
+*/\r
+\r
+struct lfds611_slist_element\r
+{\r
+ void\r
+ *volatile user_data_and_flags[2];\r
+\r
+ // TRD : requires volatile as is target of CAS\r
+ struct lfds611_slist_element\r
+ *volatile next;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
+/***** private prototypes *****/\r
+void lfds611_slist_internal_init_slist( struct lfds611_slist_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state );\r
+\r
+void lfds611_slist_internal_link_element_to_head( struct lfds611_slist_state *lfds611_slist_state, struct lfds611_slist_element *volatile se );\r
+void lfds611_slist_internal_link_element_after_element( struct lfds611_slist_element *volatile lfds611_slist_in_list_element, struct lfds611_slist_element *volatile se );\r
+\r
+void lfds611_slist_internal_move_to_first_undeleted_element( struct lfds611_slist_element **se );\r
+\r
--- /dev/null
+#include "lfds611_slist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_slist_internal_link_element_to_head( struct lfds611_slist_state *ss, struct lfds611_slist_element *volatile se )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_SINGLE_POINTER) struct lfds611_slist_element\r
+ *se_next;\r
+\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ se_next = ss->head;\r
+\r
+ do\r
+ {\r
+ se->next = se_next;\r
+ }\r
+ while( se->next != (se_next = (struct lfds611_slist_element *) lfds611_abstraction_cas((volatile lfds611_atom_t *) &ss->head, (lfds611_atom_t) se, (lfds611_atom_t) se->next)) );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_slist_internal_link_element_after_element( struct lfds611_slist_element *volatile lfds611_slist_in_list_element, struct lfds611_slist_element *volatile se )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_SINGLE_POINTER) struct lfds611_slist_element\r
+ *se_prev,\r
+ *se_next;\r
+\r
+ assert( lfds611_slist_in_list_element != NULL );\r
+ assert( se != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ se_prev = (struct lfds611_slist_element *) lfds611_slist_in_list_element;\r
+\r
+ se_next = se_prev->next;\r
+\r
+ do\r
+ {\r
+ se->next = se_next;\r
+ }\r
+ while( se->next != (se_next = (struct lfds611_slist_element *) lfds611_abstraction_cas((volatile lfds611_atom_t *) &se_prev->next, (lfds611_atom_t) se, (lfds611_atom_t) se->next)) );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds611_slist_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_slist_new( struct lfds611_slist_state **ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ assert( ss != NULL );\r
+ // TRD : user_data_delete_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ *ss = (struct lfds611_slist_state *) lfds611_liblfds_aligned_malloc( sizeof(struct lfds611_slist_state), LFDS611_ALIGN_SINGLE_POINTER );\r
+\r
+ if( *ss != NULL )\r
+ {\r
+ lfds611_slist_internal_init_slist( *ss, user_data_delete_function, user_state );\r
+ rv = 1;\r
+ }\r
+\r
+ LFDS611_BARRIER_STORE;\r
+\r
+ return( rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds611_slist_use( struct lfds611_slist_state *ss )\r
+{\r
+ assert( ss != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_slist_internal_init_slist( struct lfds611_slist_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ assert( ss != NULL );\r
+ // TRD : user_data_delete_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ ss->head = NULL;\r
+ ss->user_data_delete_function = user_data_delete_function;\r
+ ss->user_state = user_state;\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds611_slist_element *lfds611_slist_new_head( struct lfds611_slist_state *ss, void *user_data )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_SINGLE_POINTER) struct lfds611_slist_element\r
+ *volatile se;\r
+\r
+ assert( ss != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ se = (struct lfds611_slist_element *) lfds611_liblfds_aligned_malloc( sizeof(struct lfds611_slist_element), LFDS611_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( se != NULL )\r
+ {\r
+ se->user_data_and_flags[LFDS611_SLIST_USER_DATA] = user_data;\r
+ se->user_data_and_flags[LFDS611_SLIST_FLAGS] = LFDS611_SLIST_NO_FLAGS;\r
+\r
+ lfds611_slist_internal_link_element_to_head( ss, se );\r
+ }\r
+\r
+ return( (struct lfds611_slist_element *) se );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+struct lfds611_slist_element *lfds611_slist_new_next( struct lfds611_slist_element *se, void *user_data )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_SINGLE_POINTER) struct lfds611_slist_element\r
+ *volatile se_next;\r
+\r
+ assert( se != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ se_next = (struct lfds611_slist_element *) lfds611_liblfds_aligned_malloc( sizeof(struct lfds611_slist_element), LFDS611_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( se_next != NULL )\r
+ {\r
+ se_next->user_data_and_flags[LFDS611_SLIST_USER_DATA] = user_data;\r
+ se_next->user_data_and_flags[LFDS611_SLIST_FLAGS] = LFDS611_SLIST_NO_FLAGS;\r
+\r
+ lfds611_slist_internal_link_element_after_element( se, se_next );\r
+ }\r
+\r
+ return( (struct lfds611_slist_element *) se_next );\r
+}\r
+\r
--- /dev/null
+#include "lfds611_stack_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_stack_delete( struct lfds611_stack_state *ss, void (*user_data_delete_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ void\r
+ *user_data;\r
+\r
+ assert( ss != NULL );\r
+ // TRD : user_data_delete_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ while( lfds611_stack_pop(ss, &user_data) )\r
+ if( user_data_delete_function != NULL )\r
+ user_data_delete_function( user_data, user_state );\r
+\r
+ lfds611_freelist_delete( ss->fs, lfds611_stack_internal_freelist_delete_function, NULL );\r
+\r
+ lfds611_liblfds_aligned_free( ss );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_stack_clear( struct lfds611_stack_state *ss, void (*user_data_clear_function)(void *user_data, void *user_state), void *user_state )\r
+{\r
+ void\r
+ *user_data;\r
+\r
+ assert( ss != NULL );\r
+ // TRD : user_data_clear_function can be NULL\r
+ // TRD : user_state can be NULL\r
+\r
+ while( lfds611_stack_pop(ss, &user_data) )\r
+ if( user_data_clear_function != NULL )\r
+ user_data_clear_function( user_data, user_state );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds611_stack_internal_freelist_delete_function( void *user_data, void *user_state )\r
+{\r
+ assert( user_data != NULL );\r
+ assert( user_state == NULL );\r
+\r
+ lfds611_liblfds_aligned_free( user_data );\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
--- /dev/null
+/***** the library wide include file *****/\r
+#include "liblfds611_internal.h"\r
+\r
+/***** pragmas *****/\r
+\r
+/***** defines *****/\r
+#define LFDS611_STACK_POINTER 0\r
+#define LFDS611_STACK_COUNTER 1\r
+#define LFDS611_STACK_PAC_SIZE 2\r
+\r
+/***** structures *****/\r
+#pragma pack( push, LFDS611_ALIGN_DOUBLE_POINTER )\r
+\r
+struct lfds611_stack_state\r
+{\r
+ // TRD : must come first for alignment\r
+ struct lfds611_stack_element\r
+ *volatile top[LFDS611_STACK_PAC_SIZE];\r
+\r
+ lfds611_atom_t\r
+ aba_counter;\r
+\r
+ struct lfds611_freelist_state\r
+ *fs;\r
+};\r
+\r
+struct lfds611_stack_element\r
+{\r
+ struct lfds611_stack_element\r
+ *next[LFDS611_STACK_PAC_SIZE];\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ void\r
+ *user_data;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
+/***** private prototypes *****/\r
+int lfds611_stack_internal_freelist_init_function( void **user_data, void *user_state );\r
+void lfds611_stack_internal_freelist_delete_function( void *user_data, void *user_state );\r
+\r
+void lfds611_stack_internal_new_element_from_freelist( struct lfds611_stack_state *ss, struct lfds611_stack_element *se[LFDS611_STACK_PAC_SIZE], void *user_data );\r
+void lfds611_stack_internal_new_element( struct lfds611_stack_state *ss, struct lfds611_stack_element *se[LFDS611_STACK_PAC_SIZE], void *user_data );\r
+void lfds611_stack_internal_init_element( struct lfds611_stack_state *ss, struct lfds611_stack_element *se[LFDS611_STACK_PAC_SIZE], struct lfds611_freelist_element *fe, void *user_data );\r
+\r
+void lfds611_stack_internal_push( struct lfds611_stack_state *ss, struct lfds611_stack_element *se[LFDS611_STACK_PAC_SIZE] );\r
+\r
+void lfds611_stack_internal_validate( struct lfds611_stack_state *ss, struct lfds611_validation_info *vi, enum lfds611_data_structure_validity *stack_validity, enum lfds611_data_structure_validity *freelist_validity );\r
+\r
--- /dev/null
+#include "lfds611_stack_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_stack_new( struct lfds611_stack_state **ss, lfds611_atom_t number_elements )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ assert( ss != NULL );\r
+ // TRD : number_elements can be any value in its range\r
+\r
+ *ss = (struct lfds611_stack_state *) lfds611_liblfds_aligned_malloc( sizeof(struct lfds611_stack_state), LFDS611_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( *ss != NULL )\r
+ {\r
+ // TRD : the size of the lfds611_freelist is the size of the lfds611_stack\r
+ lfds611_freelist_new( &(*ss)->fs, number_elements, lfds611_stack_internal_freelist_init_function, NULL );\r
+\r
+ if( (*ss)->fs == NULL )\r
+ {\r
+ lfds611_liblfds_aligned_free( *ss );\r
+ *ss = NULL;\r
+ }\r
+\r
+ if( (*ss)->fs != NULL )\r
+ {\r
+ (*ss)->top[LFDS611_STACK_POINTER] = NULL;\r
+ (*ss)->top[LFDS611_STACK_COUNTER] = 0;\r
+ (*ss)->aba_counter = 0;\r
+ rv = 1;\r
+ }\r
+ }\r
+\r
+ LFDS611_BARRIER_STORE;\r
+\r
+ return( rv );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void lfds611_stack_use( struct lfds611_stack_state *ss )\r
+{\r
+ assert( ss != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+int lfds611_stack_internal_freelist_init_function( void **user_data, void *user_state )\r
+{\r
+ int\r
+ rv = 0;\r
+\r
+ assert( user_data != NULL );\r
+ assert( user_state == NULL );\r
+\r
+ *user_data = lfds611_liblfds_aligned_malloc( sizeof(struct lfds611_stack_element), LFDS611_ALIGN_DOUBLE_POINTER );\r
+\r
+ if( *user_data != NULL )\r
+ rv = 1;\r
+\r
+ return( rv );\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_stack_internal_new_element_from_freelist( struct lfds611_stack_state *ss, struct lfds611_stack_element *se[LFDS611_STACK_PAC_SIZE], void *user_data )\r
+{\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+ // TRD : user_data can be any value in its range\r
+\r
+ lfds611_freelist_pop( ss->fs, &fe );\r
+\r
+ if( fe == NULL )\r
+ se[LFDS611_STACK_POINTER] = NULL;\r
+\r
+ if( fe != NULL )\r
+ lfds611_stack_internal_init_element( ss, se, fe, user_data );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_stack_internal_new_element( struct lfds611_stack_state *ss, struct lfds611_stack_element *se[LFDS611_STACK_PAC_SIZE], void *user_data )\r
+{\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+ // TRD : user_data can be any value in its range\r
+\r
+ lfds611_freelist_guaranteed_pop( ss->fs, &fe );\r
+\r
+ if( fe == NULL )\r
+ se[LFDS611_STACK_POINTER] = NULL;\r
+\r
+ if( fe != NULL )\r
+ lfds611_stack_internal_init_element( ss, se, fe, user_data );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_stack_internal_init_element( struct lfds611_stack_state *ss, struct lfds611_stack_element *se[LFDS611_STACK_PAC_SIZE], struct lfds611_freelist_element *fe, void *user_data )\r
+{\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+ assert( fe != NULL );\r
+ // TRD : user_data can be any value in its range\r
+\r
+ lfds611_freelist_get_user_data_from_element( fe, (void **) &se[LFDS611_STACK_POINTER] );\r
+\r
+ se[LFDS611_STACK_COUNTER] = (struct lfds611_stack_element *) lfds611_abstraction_increment( (lfds611_atom_t *) &ss->aba_counter );\r
+\r
+ se[LFDS611_STACK_POINTER]->next[LFDS611_STACK_POINTER] = NULL;\r
+ se[LFDS611_STACK_POINTER]->next[LFDS611_STACK_COUNTER] = 0;\r
+ se[LFDS611_STACK_POINTER]->fe = fe;\r
+ se[LFDS611_STACK_POINTER]->user_data = user_data;\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "lfds611_stack_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_stack_push( struct lfds611_stack_state *ss, void *user_data )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) struct lfds611_stack_element\r
+ *se[LFDS611_STACK_PAC_SIZE];\r
+\r
+ assert( ss != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ lfds611_stack_internal_new_element_from_freelist( ss, se, user_data );\r
+\r
+ if( se[LFDS611_STACK_POINTER] == NULL )\r
+ return( 0 );\r
+\r
+ lfds611_stack_internal_push( ss, se );\r
+\r
+ return( 1 );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_stack_guaranteed_push( struct lfds611_stack_state *ss, void *user_data )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) struct lfds611_stack_element\r
+ *se[LFDS611_STACK_PAC_SIZE];\r
+\r
+ assert( ss != NULL );\r
+ // TRD : user_data can be NULL\r
+\r
+ /* TRD : this function allocated a new lfds611_freelist element and uses that\r
+ to push onto the lfds611_stack, guaranteeing success (unless malloc()\r
+ fails of course)\r
+ */\r
+\r
+ lfds611_stack_internal_new_element( ss, se, user_data );\r
+\r
+ // TRD : malloc failed\r
+ if( se[LFDS611_STACK_POINTER] == NULL )\r
+ return( 0 );\r
+\r
+ lfds611_stack_internal_push( ss, se );\r
+\r
+ return( 1 );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_stack_internal_push( struct lfds611_stack_state *ss, struct lfds611_stack_element *se[LFDS611_STACK_PAC_SIZE] )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) struct lfds611_stack_element\r
+ *original_se_next[LFDS611_STACK_PAC_SIZE];\r
+\r
+ assert( ss != NULL );\r
+ assert( se != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ original_se_next[LFDS611_STACK_POINTER] = ss->top[LFDS611_STACK_POINTER];\r
+ original_se_next[LFDS611_STACK_COUNTER] = ss->top[LFDS611_STACK_COUNTER];\r
+\r
+ do\r
+ {\r
+ se[LFDS611_STACK_POINTER]->next[LFDS611_STACK_POINTER] = original_se_next[LFDS611_STACK_POINTER];\r
+ se[LFDS611_STACK_POINTER]->next[LFDS611_STACK_COUNTER] = original_se_next[LFDS611_STACK_COUNTER];\r
+ }\r
+ while( 0 == lfds611_abstraction_dcas((volatile lfds611_atom_t *) ss->top, (lfds611_atom_t *) se, (lfds611_atom_t *) original_se_next) );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int lfds611_stack_pop( struct lfds611_stack_state *ss, void **user_data )\r
+{\r
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) struct lfds611_stack_element\r
+ *se[LFDS611_STACK_PAC_SIZE];\r
+\r
+ assert( ss != NULL );\r
+ assert( user_data != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ se[LFDS611_STACK_COUNTER] = ss->top[LFDS611_STACK_COUNTER];\r
+ se[LFDS611_STACK_POINTER] = ss->top[LFDS611_STACK_POINTER];\r
+\r
+ do\r
+ {\r
+ if( se[LFDS611_STACK_POINTER] == NULL )\r
+ return( 0 );\r
+ }\r
+ while( 0 == lfds611_abstraction_dcas((volatile lfds611_atom_t *) ss->top, (lfds611_atom_t *) se[LFDS611_STACK_POINTER]->next, (lfds611_atom_t *) se) );\r
+\r
+ *user_data = se[LFDS611_STACK_POINTER]->user_data;\r
+\r
+ lfds611_freelist_push( ss->fs, se[LFDS611_STACK_POINTER]->fe );\r
+\r
+ return( 1 );\r
+}\r
+\r
--- /dev/null
+#include "lfds611_stack_internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_stack_query( struct lfds611_stack_state *ss, enum lfds611_stack_query_type query_type, void *query_input, void *query_output )\r
+{\r
+ assert( ss != NULL );\r
+ // TRD : query_type can be any value in its range\r
+ // TRD : query_iput can be NULL\r
+ assert( query_output != NULL );\r
+\r
+ LFDS611_BARRIER_LOAD;\r
+\r
+ switch( query_type )\r
+ {\r
+ case LFDS611_STACK_QUERY_ELEMENT_COUNT:\r
+ assert( query_input == NULL );\r
+\r
+ lfds611_freelist_query( ss->fs, LFDS611_FREELIST_QUERY_ELEMENT_COUNT, NULL, query_output );\r
+ break;\r
+\r
+ case LFDS611_STACK_QUERY_VALIDATE:\r
+ // TRD : query_input can be NULL\r
+\r
+ /* TRD : the validation info passed in is for the stack\r
+ it indicates the minimum and maximum number of elements\r
+ which should be present\r
+\r
+ we need to validate the freelist\r
+ and validate the stack\r
+\r
+ we cannot know the min/max for the freelist, given only\r
+ the min/max for the stack\r
+ */\r
+\r
+ lfds611_freelist_query( ss->fs, LFDS611_FREELIST_QUERY_VALIDATE, NULL, (enum lfds611_data_structure_validity *) query_output );\r
+\r
+ if( *(enum lfds611_data_structure_validity *) query_output == LFDS611_VALIDITY_VALID )\r
+ lfds611_stack_internal_validate( ss, (struct lfds611_validation_info *) query_input, (enum lfds611_data_structure_validity *) query_output, ((enum lfds611_data_structure_validity *) query_output)+1 );\r
+ break;\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void lfds611_stack_internal_validate( struct lfds611_stack_state *ss, struct lfds611_validation_info *vi, enum lfds611_data_structure_validity *stack_validity, enum lfds611_data_structure_validity *freelist_validity )\r
+{\r
+ struct lfds611_stack_element\r
+ *se,\r
+ *se_slow,\r
+ *se_fast;\r
+\r
+ lfds611_atom_t\r
+ element_count = 0,\r
+ total_elements;\r
+\r
+ struct lfds611_validation_info\r
+ freelist_vi;\r
+\r
+ assert( ss != NULL );\r
+ // TRD : vi can be NULL\r
+ assert( stack_validity != NULL );\r
+\r
+ *stack_validity = LFDS611_VALIDITY_VALID;\r
+\r
+ se_slow = se_fast = (struct lfds611_stack_element *) ss->top[LFDS611_STACK_POINTER];\r
+\r
+ /* TRD : first, check for a loop\r
+ we have two pointers\r
+ both of which start at the top of the stack\r
+ we enter a loop\r
+ and on each iteration\r
+ we advance one pointer by one element\r
+ and the other by two\r
+\r
+ we exit the loop when both pointers are NULL\r
+ (have reached the end of the stack)\r
+\r
+ or\r
+\r
+ if we fast pointer 'sees' the slow pointer\r
+ which means we have a loop\r
+ */\r
+\r
+ if( se_slow != NULL )\r
+ do\r
+ {\r
+ se_slow = se_slow->next[LFDS611_STACK_POINTER];\r
+\r
+ if( se_fast != NULL )\r
+ se_fast = se_fast->next[LFDS611_STACK_POINTER];\r
+\r
+ if( se_fast != NULL )\r
+ se_fast = se_fast->next[LFDS611_STACK_POINTER];\r
+ }\r
+ while( se_slow != NULL and se_fast != se_slow );\r
+\r
+ if( se_fast != NULL and se_slow != NULL and se_fast == se_slow )\r
+ *stack_validity = LFDS611_VALIDITY_INVALID_LOOP;\r
+\r
+ /* TRD : now check for expected number of elements\r
+ vi can be NULL, in which case we do not check\r
+ we know we don't have a loop from our earlier check\r
+ */\r
+\r
+ if( *stack_validity == LFDS611_VALIDITY_VALID and vi != NULL )\r
+ {\r
+ se = (struct lfds611_stack_element *) ss->top[LFDS611_STACK_POINTER];\r
+\r
+ while( se != NULL )\r
+ {\r
+ element_count++;\r
+ se = (struct lfds611_stack_element *) se->next[LFDS611_STACK_POINTER];\r
+ }\r
+\r
+ if( element_count < vi->min_elements )\r
+ *stack_validity = LFDS611_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ if( element_count > vi->max_elements )\r
+ *stack_validity = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+ }\r
+\r
+ /* TRD : now we validate the freelist\r
+\r
+ we may be able to check for the expected number of\r
+ elements in the freelist\r
+\r
+ if the caller has given us an expected min and max\r
+ number of elements in the stack, then the total number\r
+ of elements in the freelist, minus that min and max,\r
+ gives us the expected number of elements in the\r
+ freelist\r
+ */\r
+\r
+ if( vi != NULL )\r
+ {\r
+ lfds611_freelist_query( ss->fs, LFDS611_FREELIST_QUERY_ELEMENT_COUNT, NULL, (void *) &total_elements );\r
+\r
+ freelist_vi.min_elements = total_elements - vi->max_elements;\r
+ freelist_vi.max_elements = total_elements - vi->min_elements;\r
+\r
+ lfds611_freelist_query( ss->fs, LFDS611_FREELIST_QUERY_VALIDATE, (void *) &freelist_vi, (void *) freelist_validity );\r
+ }\r
+\r
+ if( vi == NULL )\r
+ lfds611_freelist_query( ss->fs, LFDS611_FREELIST_QUERY_VALIDATE, NULL, (void *) freelist_validity );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+/***** public prototypes *****/\r
+#include "liblfds611.h"\r
+\r
+/***** defines *****/\r
+#define and &&\r
+#define or ||\r
+\r
+#define RAISED 1\r
+#define LOWERED 0\r
+\r
+#define NO_FLAGS 0x0\r
+\r
+/***** private prototypes *****/\r
+void *lfds611_liblfds_aligned_malloc( size_t size, size_t align_in_bytes );\r
+void lfds611_liblfds_aligned_free( void *memory );\r
+\r
+static LFDS611_INLINE lfds611_atom_t lfds611_abstraction_cas( volatile lfds611_atom_t *destination, lfds611_atom_t exchange, lfds611_atom_t compare );\r
+static LFDS611_INLINE unsigned char lfds611_abstraction_dcas( volatile lfds611_atom_t *destination, lfds611_atom_t *exchange, lfds611_atom_t *compare );\r
+static LFDS611_INLINE lfds611_atom_t lfds611_abstraction_increment( volatile lfds611_atom_t *value );\r
+\r
+/***** inlined code *****/\r
+#include "lfds611_abstraction/lfds611_abstraction_cas.c"\r
+#include "lfds611_abstraction/lfds611_abstraction_dcas.c"\r
+#include "lfds611_abstraction/lfds611_abstraction_increment.c"\r
+\r
--- /dev/null
+building test\r
+=============\r
+\r
+Windows (user-mode)\r
+===================\r
+1. Use Microsoft Visual Studio 2008 or Visual C++ 2008 Express Edition (or\r
+ later versions) to load "liblfds.sln". The "Win32" platform is x86,\r
+ the "x64" platform is x64. The test programme provides the "Release"\r
+ and "Debug" targets. The other targets ("Release DLL", "Release Lib",\r
+ "Debug DLL" and "Debug Lib") are carried over will-nilly from the liblds\r
+ library.\r
+\r
+ All builds will work, but DLL builds will require the DLL from liblfds\r
+ to be placed into a location where the test executable can find it (e.g.\r
+ the same directory).\r
+\r
+2. Use Microsoft Windows SDK and GNUmake to run makefile.windows (obviously\r
+ you'll need to have run setenv.bat or the appropriate vcvars*.bat first;\r
+ you can build for x64/64-bit and x86/32-bit - just run the correct batch\r
+ file).\r
+\r
+ If liblfds has been built as a DLL, the DLL from liblfds needs to be\r
+ placed into a location where the test executable can find it (e.g. the\r
+ same directory).\r
+\r
+ Targets are "rel", "dbg" and "clean". You need to clean between switching\r
+ targets.\r
+\r
+Windows (kernel)\r
+================\r
+No build supported, since this is a command line utility.\r
+\r
+Linux\r
+=====\r
+Use GNUmake to run "makefile.linux". Targets are "rel", "dbg" and\r
+"clean". You need to clean between switching targets.\r
+\r
+If liblfds has been built as a shared object, the shared object file from\r
+liblfds will need to be placed somewhere the text executable can find it.\r
+\r
+A convenient solution is to place the shared object file in the same\r
+directory as the text executable and set the environment variable\r
+"LD_LIBRARY_PATH" to ".", e.g. in bash;\r
+\r
+export LD_LIBRARY_PATH=.\r
+\r
+Remember to unset after finishing testing, or your system will continue\r
+to scan the current directory for shared object files.\r
+\r
+unset LD_LIBRARY_PATH\r
+\r
--- /dev/null
+##### paths #####\r
+BINDIR = bin\r
+INCDIR = ../liblfds611/inc\r
+LIBDIR = ../liblfds611/bin\r
+OBJDIR = obj\r
+SRCDIR = src\r
+\r
+##### misc #####\r
+QUIETLY = 1>nul 2>nul\r
+\r
+##### sources, objects and libraries #####\r
+BINNAME = test\r
+BINARY = $(BINDIR)/$(BINNAME)\r
+SRCDIRS = .\r
+SOURCES = abstraction_cpu_count.c test_abstraction.c abstraction_thread_start.c abstraction_thread_wait.c benchmark_freelist.c benchmark_queue.c benchmark_ringbuffer.c benchmark_stack.c test_freelist.c main.c misc.c test_queue.c test_ringbuffer.c test_slist.c test_stack.c\r
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))\r
+SYSLIBS = -lpthread -lc -lm\r
+USRLIBS = -llfds611\r
+\r
+##### CPU variants #####\r
+UNAME = $(shell uname -m)\r
+GCCARCH = -march=$(UNAME)\r
+\r
+ifeq ($(UNAME),x86_64)\r
+ GCCARCH = -march=core2\r
+endif\r
+\r
+ifeq ($(findstring arm,$(UNAME)),arm)\r
+ GCCARCH = -march=armv6k -marm\r
+endif\r
+\r
+##### tools #####\r
+MAKE = make\r
+MFLAGS = \r
+\r
+DG = gcc\r
+DGFLAGS = -MM -std=c99 -I"$(SRCDIR)" -I"$(INCDIR)" \r
+\r
+CC = gcc\r
+CFBASE = -Wall -Wno-unknown-pragmas -std=c99 $(GCCARCH) -pthread -c -I"$(SRCDIR)" -I"$(INCDIR)"\r
+CFREL = -O2 -Wno-strict-aliasing\r
+CFDBG = -O0 -g\r
+\r
+LD = gcc\r
+LFBASE = -L"$(LIBDIR)"\r
+LFREL = -O2 -s\r
+LFDBG = -O0 -g\r
+\r
+##### variants #####\r
+CFLAGS = $(CFBASE) $(CFDBG)\r
+LFLAGS = $(LFBASE) $(LFDBG)\r
+\r
+ifeq ($(MAKECMDGOALS),rel)\r
+ CFLAGS = $(CFBASE) $(CFREL)\r
+ LFLAGS = $(LFBASE) $(LFREL)\r
+endif\r
+\r
+##### search paths #####\r
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))\r
+\r
+##### implicit rules #####\r
+$(OBJDIR)/%.o : %.c\r
+ $(DG) $(DGFLAGS) $< >$(OBJDIR)/$*.d\r
+ $(CC) $(CFLAGS) -o $@ $<\r
+\r
+##### explicit rules #####\r
+$(BINARY) : $(OBJECTS)\r
+ $(LD) -o $(BINARY) $(LFLAGS) $(OBJECTS) $(USRLIBS) $(SYSLIBS)\r
+ chmod +x $(BINARY)\r
+\r
+##### phony #####\r
+.PHONY : clean rel dbg\r
+\r
+clean : \r
+ @rm -f $(BINDIR)/$(BINNAME) $(OBJDIR)/*.o $(OBJDIR)/*.d\r
+\r
+rel : $(BINARY)\r
+dbg : $(BINARY)\r
+\r
+##### dependencies #####\r
+-include $(DEPENDS)\r
+\r
--- /dev/null
+##### paths #####\r
+BINDIR = bin\r
+INCDIR = ../liblfds611/inc\r
+LIBDIR = ../liblfds611/bin\r
+OBJDIR = obj\r
+SRCDIR = src\r
+\r
+##### misc #####\r
+QUIETLY = 1>nul 2>nul\r
+\r
+##### sources, objects and libraries #####\r
+BINNAME = test\r
+BINARY = $(BINDIR)\$(BINNAME).exe\r
+SRCDIRS = .\r
+SOURCES = abstraction_cpu_count.c test_abstraction.c abstraction_thread_start.c abstraction_thread_wait.c benchmark_freelist.c benchmark_queue.c benchmark_ringbuffer.c benchmark_stack.c test_freelist.c main.c misc.c test_queue.c test_ringbuffer.c test_slist.c test_stack.c\r
+OBJECTS = $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))\r
+SYSLIBS = kernel32.lib\r
+USRLIBS = liblfds611.lib\r
+\r
+##### tools #####\r
+MAKE = make\r
+MFLAGS = \r
+\r
+CC = cl\r
+CFBASE = /nologo /W4 /WX /c "-I$(SRCDIR)" "-I$(INCLUDE)" "-I$(INCDIR)" "/Fd$(BINDIR)\$(BINNAME).pdb" /D UNICODE /D _UNICODE /DWIN32_LEAN_AND_MEAN /D_CRT_SECURE_NO_WARNINGS\r
+CFREL = /Ox /DNDEBUG /MT\r
+CFDBG = /Od /Gm /Zi /D_DEBUG /MTd\r
+\r
+LD = link\r
+LFBASE = "/libpath:$(LIB)" "/libpath:$(LIBDIR)" /nologo /subsystem:console /nodefaultlib /nxcompat /wx\r
+LFREL = /incremental:no\r
+LFDBG = /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"\r
+\r
+##### variants #####\r
+CFLAGS = $(CFBASE) $(CFDBG)\r
+LFLAGS = $(LFBASE) $(LFDBG)\r
+CLIB = libcmtd.lib\r
+\r
+ifeq ($(MAKECMDGOALS),rel)\r
+ CFLAGS = $(CFBASE) $(CFREL)\r
+ LFLAGS = $(LFBASE) $(LFREL)\r
+ CLIB = libcmt.lib\r
+endif\r
+\r
+##### search paths #####\r
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))\r
+\r
+##### implicit rules #####\r
+$(OBJDIR)/%.obj : %.c\r
+ $(CC) $(CFLAGS) "/Fo$@" $<\r
+\r
+##### explicit rules #####\r
+$(BINARY) : $(OBJECTS)\r
+ $(LD) $(LFLAGS) $(CLIB) $(SYSLIBS) $(USRLIBS) $(OBJECTS) /out:$(BINARY)\r
+\r
+##### phony #####\r
+.PHONY : clean rel dbg\r
+\r
+clean : \r
+ @erase /Q $(OBJDIR)\*.obj $(BINDIR)\$(BINNAME).* $(QUIETLY)\r
+\r
+rel : $(BINARY)\r
+dbg : $(BINARY)\r
+\r
--- /dev/null
+/***** defines *****/\r
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)\r
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ !WIN_KERNEL_BUILD indicates Windows user-mode\r
+ */\r
+\r
+ #include <windows.h>\r
+ typedef HANDLE thread_state_t;\r
+ typedef DWORD thread_return_t;\r
+ #define CALLING_CONVENTION WINAPI\r
+#endif\r
+\r
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)\r
+ /* TRD : any Windows (kernel-mode) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ WIN_KERNEL_BUILD indicates Windows kernel\r
+ */\r
+\r
+ #include <wdm.h>\r
+ typedef HANDLE thread_state_t;\r
+ typedef VOID thread_return_t;\r
+ #define CALLING_CONVENTION \r
+#endif\r
+\r
+#if (defined __unix__ && defined __GNUC__)\r
+ /* TRD : any UNIX on any CPU with GCC\r
+\r
+ __unix__ indicates Solaris, Linux, HPUX, etc\r
+ __GNUC__ indicates GCC\r
+ */\r
+ #include <unistd.h>\r
+ #include <pthread.h>\r
+ #include <sched.h>\r
+ typedef pthread_t thread_state_t;\r
+ typedef void * thread_return_t;\r
+ #define CALLING_CONVENTION \r
+#endif\r
+\r
+typedef thread_return_t (CALLING_CONVENTION *thread_function_t)( void *thread_user_state );\r
+\r
+/***** public prototypes *****/\r
+unsigned int abstraction_cpu_count( void );\r
+int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state );\r
+void abstraction_thread_wait( thread_state_t thread_state );\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ !WIN_KERNEL_BUILD indicates Windows user-mode\r
+ */\r
+\r
+ unsigned int abstraction_cpu_count()\r
+ {\r
+ SYSTEM_INFO\r
+ si;\r
+\r
+ GetNativeSystemInfo( &si );\r
+\r
+ return( (unsigned int) si.dwNumberOfProcessors );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ WIN_KERNEL_BUILD indicates Windows kernel\r
+ */\r
+\r
+ unsigned int abstraction_cpu_count()\r
+ {\r
+ unsigned int\r
+ active_processor_count;\r
+\r
+ active_processor_count = KeQueryActiveProcessorCount( NULL );\r
+\r
+ return( active_processor_count );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined __linux__ && defined __GNUC__)\r
+\r
+ /* TRD : Linux on any CPU with GCC\r
+\r
+ this function I believe is Linux specific and varies by UNIX flavour\r
+\r
+ __linux__ indicates Linux\r
+ __GNUC__ indicates GCC\r
+ */\r
+\r
+ unsigned int abstraction_cpu_count()\r
+ {\r
+ long int\r
+ cpu_count;\r
+\r
+ cpu_count = sysconf( _SC_NPROCESSORS_ONLN );\r
+\r
+ return( (unsigned int) cpu_count );\r
+ }\r
+\r
+#endif\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ !WIN_KERNEL_BUILD indicates Windows user-mode\r
+ */\r
+\r
+ int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state )\r
+ {\r
+ int\r
+ rv = 0;\r
+\r
+ DWORD\r
+ thread_id;\r
+\r
+ DWORD_PTR\r
+ affinity_mask,\r
+ result;\r
+\r
+ assert( thread_state != NULL );\r
+ // TRD : cpu can be any value in its range\r
+ assert( thread_function != NULL );\r
+ // TRD : thread_user_state can be NULL\r
+\r
+ affinity_mask = (DWORD_PTR) (1 << cpu);\r
+\r
+ *thread_state = CreateThread( NULL, 0, thread_function, thread_user_state, NO_FLAGS, &thread_id );\r
+\r
+ result = SetThreadAffinityMask( *thread_state, affinity_mask );\r
+\r
+ if( *thread_state != NULL and result != 0 )\r
+ rv = 1;\r
+\r
+ return( rv );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ WIN_KERNEL_BUILD indicates Windows kernel\r
+ */\r
+\r
+ int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state )\r
+ {\r
+ int\r
+ rv = 0;\r
+\r
+ KAFFINITY\r
+ affinity_mask\r
+\r
+ NTSTATUS\r
+ nts_create,\r
+ nts_affinity;\r
+\r
+ assert( thread_state != NULL );\r
+ // TRD : cpu can be any value in its range\r
+ assert( thread_function != NULL );\r
+ // TRD : thread_user_state can be NULL\r
+\r
+ affinity_mask = 1 << cpu;\r
+\r
+ nts_create = PsCreateSystemThread( thread_state, THREAD_ALL_ACCESS, NULL, NULL, NULL, thread_function, thread_user_state );\r
+\r
+ nts_affinity = ZwSetInformationThread( thread_state, ThreadAffinityMask, &affinity_mask, sizeof(KAFFINITY) );\r
+\r
+ if( nts_create == STATUS_SUCCESS and nts_affinity == STATUS_SUCCESS )\r
+ rv = 1;\r
+\r
+ return( rv );\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined __unix__)\r
+\r
+ /* TRD : any UNIX on any CPU with any compiler\r
+\r
+ I assumed pthreads is available on any UNIX.\r
+\r
+ __unix__ indicates Solaris, Linux, HPUX, etc\r
+ */\r
+\r
+ int abstraction_thread_start( thread_state_t *thread_state, unsigned int cpu, thread_function_t thread_function, void *thread_user_state )\r
+ {\r
+ int\r
+ rv = 0,\r
+ rv_create;\r
+\r
+ pthread_attr_t\r
+ attr;\r
+\r
+ cpu_set_t\r
+ cpuset;\r
+\r
+ assert( thread_state != NULL );\r
+ // TRD : cpu can be any value in its range\r
+ assert( thread_function != NULL );\r
+ // TRD : thread_user_state can be NULL\r
+\r
+ pthread_attr_init( &attr );\r
+\r
+ CPU_ZERO( &cpuset );\r
+ CPU_SET( cpu, &cpuset );\r
+ pthread_attr_setaffinity_np( &attr, sizeof(cpuset), &cpuset );\r
+\r
+ rv_create = pthread_create( thread_state, &attr, thread_function, thread_user_state );\r
+\r
+ if( rv_create == 0 )\r
+ rv = 1;\r
+\r
+ pthread_attr_destroy( &attr );\r
+\r
+ return( rv );\r
+ }\r
+\r
+#endif\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && !defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows (user-mode) on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ !WIN_KERNEL_BUILD indicates Windows user-mode\r
+ */\r
+\r
+ void abstraction_thread_wait( thread_state_t thread_state )\r
+ {\r
+ WaitForSingleObject( thread_state, INFINITE );\r
+\r
+ return;\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined _WIN32 && defined _MSC_VER && defined WIN_KERNEL_BUILD)\r
+\r
+ /* TRD : any Windows on any CPU with the Microsoft C compiler\r
+\r
+ _WIN32 indicates 64-bit or 32-bit Windows\r
+ _MSC_VER indicates Microsoft C compiler\r
+ WIN_KERNEL_BUILD indicates Windows kernel\r
+ */\r
+\r
+ void abstraction_thread_wait( thread_state_t thread_state )\r
+ {\r
+ KeWaitForSingleObject( thread_state, Executive, KernelMode, FALSE, NULL );\r
+\r
+ return;\r
+ }\r
+\r
+#endif\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#if (defined __unix__)\r
+\r
+ /* TRD : any UNIX on any CPU with any compiler\r
+\r
+ I assumed pthreads is available on any UNIX.\r
+\r
+ __unix__ indicates Solaris, Linux, HPUX, etc\r
+ */\r
+\r
+ void abstraction_thread_wait( thread_state_t thread_state )\r
+ {\r
+ pthread_join( thread_state, NULL );\r
+\r
+ return;\r
+ }\r
+\r
+#endif\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void benchmark_lfds611_freelist( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ thread_count,\r
+ cpu_count;\r
+\r
+ struct lfds611_freelist_state\r
+ *fs;\r
+\r
+ struct lfds611_freelist_benchmark\r
+ *fb;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ lfds611_atom_t\r
+ total_operations_for_full_test_for_all_cpus,\r
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;\r
+\r
+ double\r
+ mean_operations_per_second_per_cpu,\r
+ difference_per_second_per_cpu,\r
+ total_difference_per_second_per_cpu,\r
+ std_dev_per_second_per_cpu,\r
+ scalability;\r
+\r
+ /* TRD : here we benchmark the freelist\r
+\r
+ the benchmark is to have a single freelist\r
+ where a worker thread busy-works popping and then pushing\r
+ */\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ fb = (struct lfds611_freelist_benchmark *) malloc( sizeof(struct lfds611_freelist_benchmark) * cpu_count );\r
+\r
+ // TRD : print the benchmark ID and CSV header\r
+ printf( "\n"\r
+ "Release %s Freelist Benchmark #1\n"\r
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS611_RELEASE_NUMBER_STRING );\r
+\r
+ // TRD : we run CPU count times for scalability\r
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )\r
+ {\r
+ // TRD : initialisation\r
+ lfds611_freelist_new( &fs, 1000, NULL, NULL );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (fb+loop)->fs = fs;\r
+ (fb+loop)->operation_count = 0;\r
+ }\r
+\r
+ // TRD : main test\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds611_freelist_thread_pop_and_push, fb+loop );\r
+\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ // TRD : post test math\r
+ total_operations_for_full_test_for_all_cpus = 0;\r
+ total_difference_per_second_per_cpu = 0;\r
+\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ total_operations_for_full_test_for_all_cpus += (fb+loop)->operation_count;\r
+\r
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;\r
+\r
+ if( thread_count == 1 )\r
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;\r
+\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ {\r
+ difference_per_second_per_cpu = ((double) (fb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;\r
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;\r
+ }\r
+\r
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );\r
+\r
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);\r
+\r
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );\r
+\r
+ // TRD : cleanup\r
+ lfds611_freelist_delete( fs, NULL, NULL );\r
+ }\r
+\r
+ free( fb );\r
+\r
+ free( thread_handles );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION benchmark_lfds611_freelist_thread_pop_and_push( void *freelist_benchmark )\r
+{\r
+ struct lfds611_freelist_benchmark\r
+ *fb;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ assert( freelist_benchmark != NULL );\r
+\r
+ fb = (struct lfds611_freelist_benchmark *) freelist_benchmark;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds611_freelist_pop( fb->fs, &fe );\r
+ lfds611_freelist_push( fb->fs, fe );\r
+\r
+ fb->operation_count += 2;\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void benchmark_lfds611_queue( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ thread_count,\r
+ cpu_count;\r
+\r
+ struct lfds611_queue_state\r
+ *qs;\r
+\r
+ struct lfds611_queue_benchmark\r
+ *qb;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ lfds611_atom_t\r
+ total_operations_for_full_test_for_all_cpus,\r
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;\r
+\r
+ double\r
+ mean_operations_per_second_per_cpu,\r
+ difference_per_second_per_cpu,\r
+ total_difference_per_second_per_cpu,\r
+ std_dev_per_second_per_cpu,\r
+ scalability;\r
+\r
+ /* TRD : here we benchmark the queue\r
+\r
+ the benchmark is to have a single queue\r
+ where a worker thread busy-works dequeuing and then queuing\r
+ */\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ qb = (struct lfds611_queue_benchmark *) malloc( sizeof(struct lfds611_queue_benchmark) * cpu_count );\r
+\r
+ // TRD : print the benchmark ID and CSV header\r
+ printf( "\n"\r
+ "Release %s Queue Benchmark #1\n"\r
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS611_RELEASE_NUMBER_STRING );\r
+\r
+ // TRD : we run CPU count times for scalability\r
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )\r
+ {\r
+ // TRD : initialisation\r
+ lfds611_queue_new( &qs, 1000 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (qb+loop)->qs = qs;\r
+ (qb+loop)->operation_count = 0;\r
+ }\r
+\r
+ // TRD : populate the queue (we don't actually use the user data)\r
+ for( loop = 0 ; loop < 500 ; loop++ )\r
+ lfds611_queue_enqueue( qs, (void *) (lfds611_atom_t) loop );\r
+\r
+ // TRD : main test\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds611_queue_thread_delfds611_queue_and_enqueue, qb+loop );\r
+\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ // TRD : post test math\r
+ total_operations_for_full_test_for_all_cpus = 0;\r
+ total_difference_per_second_per_cpu = 0;\r
+\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ total_operations_for_full_test_for_all_cpus += (qb+loop)->operation_count;\r
+\r
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;\r
+\r
+ if( thread_count == 1 )\r
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;\r
+\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ {\r
+ difference_per_second_per_cpu = ((double) (qb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;\r
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;\r
+ }\r
+\r
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );\r
+\r
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);\r
+\r
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );\r
+\r
+ // TRD : cleanup\r
+ lfds611_queue_delete( qs, NULL, NULL );\r
+ }\r
+\r
+ free( qb );\r
+\r
+ free( thread_handles );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION benchmark_lfds611_queue_thread_delfds611_queue_and_enqueue( void *queue_benchmark )\r
+{\r
+ struct lfds611_queue_benchmark\r
+ *qb;\r
+\r
+ void\r
+ *user_data;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ assert( queue_benchmark != NULL );\r
+\r
+ qb = (struct lfds611_queue_benchmark *) queue_benchmark;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds611_queue_dequeue( qb->qs, &user_data );\r
+ lfds611_queue_enqueue( qb->qs, user_data );\r
+\r
+ qb->operation_count += 2;\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void benchmark_lfds611_ringbuffer( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ thread_count,\r
+ cpu_count;\r
+\r
+ struct lfds611_ringbuffer_state\r
+ *rs;\r
+\r
+ struct lfds611_ringbuffer_benchmark\r
+ *rb;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ lfds611_atom_t\r
+ total_operations_for_full_test_for_all_cpus,\r
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;\r
+\r
+ double\r
+ mean_operations_per_second_per_cpu,\r
+ difference_per_second_per_cpu,\r
+ total_difference_per_second_per_cpu,\r
+ std_dev_per_second_per_cpu,\r
+ scalability;\r
+\r
+ /* TRD : here we benchmark the ringbuffer\r
+\r
+ the benchmark is to have a single ringbuffer\r
+ where a worker thread busy-works writing and then reading\r
+ */\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ rb = (struct lfds611_ringbuffer_benchmark *) malloc( sizeof(struct lfds611_ringbuffer_benchmark) * cpu_count );\r
+\r
+ // TRD : print the benchmark ID and CSV header\r
+ printf( "\n"\r
+ "Release %s Ringbuffer Benchmark #1\n"\r
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS611_RELEASE_NUMBER_STRING );\r
+\r
+ // TRD : we run CPU count times for scalability\r
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )\r
+ {\r
+ // TRD : initialisation\r
+ lfds611_ringbuffer_new( &rs, 1000, NULL, NULL );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (rb+loop)->rs = rs;\r
+ (rb+loop)->operation_count = 0;\r
+ }\r
+\r
+ // TRD : main test\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds611_ringbuffer_thread_write_and_read, rb+loop );\r
+\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ // TRD : post test math\r
+ total_operations_for_full_test_for_all_cpus = 0;\r
+ total_difference_per_second_per_cpu = 0;\r
+\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ total_operations_for_full_test_for_all_cpus += (rb+loop)->operation_count;\r
+\r
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;\r
+\r
+ if( thread_count == 1 )\r
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;\r
+\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ {\r
+ difference_per_second_per_cpu = ((double) (rb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;\r
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;\r
+ }\r
+\r
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );\r
+\r
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);\r
+\r
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );\r
+\r
+ // TRD : cleanup\r
+ lfds611_ringbuffer_delete( rs, NULL, NULL );\r
+ }\r
+\r
+ free( rb );\r
+\r
+ free( thread_handles );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION benchmark_lfds611_ringbuffer_thread_write_and_read( void *ringbuffer_benchmark )\r
+{\r
+ struct lfds611_ringbuffer_benchmark\r
+ *rb;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ assert( ringbuffer_benchmark != NULL );\r
+\r
+ rb = (struct lfds611_ringbuffer_benchmark *) ringbuffer_benchmark;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds611_ringbuffer_get_write_element( rb->rs, &fe, NULL );\r
+ lfds611_ringbuffer_put_write_element( rb->rs, fe );\r
+\r
+ lfds611_ringbuffer_get_read_element( rb->rs, &fe );\r
+ lfds611_ringbuffer_put_read_element( rb->rs, fe );\r
+\r
+ rb->operation_count += 2;\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void benchmark_lfds611_stack( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ thread_count,\r
+ cpu_count;\r
+\r
+ struct lfds611_stack_state\r
+ *ss;\r
+\r
+ struct lfds611_stack_benchmark\r
+ *sb;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ lfds611_atom_t\r
+ total_operations_for_full_test_for_all_cpus,\r
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = 0;\r
+\r
+ double\r
+ mean_operations_per_second_per_cpu,\r
+ difference_per_second_per_cpu,\r
+ total_difference_per_second_per_cpu,\r
+ std_dev_per_second_per_cpu,\r
+ scalability;\r
+\r
+ /* TRD : here we benchmark the stack\r
+\r
+ the benchmark is to have a single stack\r
+ where a worker thread busy-works pushing then popping\r
+ */\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ thread_handles = (thread_state_t *) malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ sb = (struct lfds611_stack_benchmark *) malloc( sizeof(struct lfds611_stack_benchmark) * cpu_count );\r
+\r
+ // TRD : print the benchmark ID and CSV header\r
+ printf( "\n"\r
+ "Release %s Stack Benchmark #1\n"\r
+ "CPUs,total ops,mean ops/sec per CPU,standard deviation,scalability\n", LFDS611_RELEASE_NUMBER_STRING );\r
+\r
+ // TRD : we run CPU count times for scalability\r
+ for( thread_count = 1 ; thread_count <= cpu_count ; thread_count++ )\r
+ {\r
+ // TRD : initialisation\r
+ lfds611_stack_new( &ss, 1000 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (sb+loop)->ss = ss;\r
+ (sb+loop)->operation_count = 0;\r
+ }\r
+\r
+ // TRD : main test\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, benchmark_lfds611_stack_thread_push_and_pop, sb+loop );\r
+\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ // TRD : post test math\r
+ total_operations_for_full_test_for_all_cpus = 0;\r
+ total_difference_per_second_per_cpu = 0;\r
+\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ total_operations_for_full_test_for_all_cpus += (sb+loop)->operation_count;\r
+\r
+ mean_operations_per_second_per_cpu = ((double) total_operations_for_full_test_for_all_cpus / (double) thread_count) / (double) 10;\r
+\r
+ if( thread_count == 1 )\r
+ total_operations_for_full_test_for_all_cpus_for_one_cpu = total_operations_for_full_test_for_all_cpus;\r
+\r
+ for( loop = 0 ; loop < thread_count ; loop++ )\r
+ {\r
+ difference_per_second_per_cpu = ((double) (sb+loop)->operation_count / (double) 10) - mean_operations_per_second_per_cpu;\r
+ total_difference_per_second_per_cpu += difference_per_second_per_cpu * difference_per_second_per_cpu;\r
+ }\r
+\r
+ std_dev_per_second_per_cpu = sqrt( (double) total_difference_per_second_per_cpu );\r
+\r
+ scalability = (double) total_operations_for_full_test_for_all_cpus / (double) (total_operations_for_full_test_for_all_cpus_for_one_cpu * thread_count);\r
+\r
+ printf( "%u,%u,%.0f,%.0f,%0.2f\n", thread_count, (unsigned int) total_operations_for_full_test_for_all_cpus, mean_operations_per_second_per_cpu, std_dev_per_second_per_cpu, scalability );\r
+\r
+ // TRD : cleanup\r
+ lfds611_stack_delete( ss, NULL, NULL );\r
+ }\r
+\r
+ free( sb );\r
+\r
+ free( thread_handles );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION benchmark_lfds611_stack_thread_push_and_pop( void *stack_benchmark )\r
+{\r
+ struct lfds611_stack_benchmark\r
+ *sb;\r
+\r
+ void\r
+ *user_data = NULL;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ assert( stack_benchmark != NULL );\r
+\r
+ sb = (struct lfds611_stack_benchmark *) stack_benchmark;\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds611_stack_push( sb->ss, user_data );\r
+ lfds611_stack_pop( sb->ss, &user_data );\r
+\r
+ sb->operation_count += 2;\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+/***** ANSI includes *****/\r
+/* TRD : _GNU_SOURCE is required by sched.h for pthread_attr_setaffinity_np, CPU_ZERO and CPU_SET\r
+ however it has to be defined very early as even the ANSI headers pull in stuff\r
+ which uses _GNU_SOURCE and which I think must be protected against multiple inclusion,\r
+ which basically means if you set it too late, it's not seen, because the headers\r
+ have already been parsed with _GNU_SOURCE unset\r
+*/\r
+\r
+#define _GNU_SOURCE\r
+\r
+#include <assert.h>\r
+#include <math.h>\r
+#include <stdio.h>\r
+#include <stdarg.h>\r
+#include <stdlib.h>\r
+#include <string.h>\r
+#include <time.h>\r
+\r
+/***** internal includes *****/\r
+#include "abstraction.h"\r
+\r
+/***** external includes *****/\r
+#include "liblfds611.h"\r
+\r
+/***** defines *****/\r
+#define and &&\r
+#define or ||\r
+\r
+#define RAISED 1\r
+#define LOWERED 0\r
+\r
+#define NO_FLAGS 0x0\r
+\r
+/***** enums *****/\r
+enum lfds611_test_operation\r
+{\r
+ UNKNOWN,\r
+ HELP,\r
+ TEST,\r
+ BENCHMARK\r
+};\r
+\r
+/***** structs *****/\r
+#include "structures.h"\r
+\r
+/***** prototypes *****/\r
+int main( int argc, char **argv );\r
+\r
+void internal_display_test_name( char *test_name );\r
+void internal_display_test_result( unsigned int number_name_dvs_pairs, ... );\r
+void internal_display_lfds611_data_structure_validity( enum lfds611_data_structure_validity dvs );\r
+\r
+void benchmark_lfds611_freelist( void );\r
+ thread_return_t CALLING_CONVENTION benchmark_lfds611_freelist_thread_pop_and_push( void *freelist_benchmark );\r
+\r
+void benchmark_lfds611_queue( void );\r
+ thread_return_t CALLING_CONVENTION benchmark_lfds611_queue_thread_delfds611_queue_and_enqueue( void *queue_benchmark );\r
+\r
+void benchmark_lfds611_ringbuffer( void );\r
+ thread_return_t CALLING_CONVENTION benchmark_lfds611_ringbuffer_thread_write_and_read( void *ringbuffer_benchmark );\r
+\r
+void benchmark_lfds611_stack( void );\r
+ thread_return_t CALLING_CONVENTION benchmark_lfds611_stack_thread_push_and_pop( void *stack_benchmark );\r
+\r
+void test_lfds611_abstraction( void );\r
+ void abstraction_test_increment( void );\r
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_increment( void *shared_counter );\r
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_atomic_increment( void *shared_counter );\r
+ void abstraction_test_cas( void );\r
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_cas( void *abstraction_test_cas_state );\r
+ void abstraction_test_dcas( void );\r
+ thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_dcas( void *abstraction_test_dcas_state );\r
+\r
+void test_lfds611_freelist( void );\r
+ void freelist_test_internal_popping( void );\r
+ int freelist_test_internal_popping_init( void **user_data, void *user_state );\r
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping( void *freelist_test_popping_state );\r
+ void freelist_test_internal_pushing( void );\r
+ int freelist_test_internal_pushing_init( void **user_data, void *user_state );\r
+ void freelist_test_internal_pushing_delete( void *user_data, void *user_state );\r
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_pushing( void *freelist_test_pushing_state );\r
+ void freelist_test_internal_popping_and_pushing( void );\r
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_popping( void *freelist_test_popping_and_pushing_state );\r
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_pushing( void *freelist_test_popping_and_pushing_state );\r
+ void freelist_test_internal_rapid_popping_and_pushing( void );\r
+ thread_return_t CALLING_CONVENTION freelist_test_internal_thread_rapid_popping_and_pushing( void *lfds611_freelist_state );\r
+\r
+void test_lfds611_queue( void );\r
+ void queue_test_enqueuing( void );\r
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_enqueuer( void *queue_test_enqueuing_state );\r
+ void queue_test_dequeuing( void );\r
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_dequeuer( void *queue_test_dequeuing_state );\r
+ void queue_test_enqueuing_and_dequeuing( void );\r
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_enqueuer_and_dequeuer( void *queue_test_rapid_enqueuing_and_dequeuing_state );\r
+ void queue_test_rapid_enqueuing_and_dequeuing( void );\r
+ thread_return_t CALLING_CONVENTION queue_test_internal_thread_rapid_enqueuer_and_dequeuer( void *queue_test_rapid_enqueuing_and_dequeuing_state );\r
+\r
+void test_lfds611_ringbuffer( void );\r
+ void ringbuffer_test_reading( void );\r
+ thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_reader( void *ringbuffer_test_reading_state );\r
+ void ringbuffer_test_writing( void );\r
+ thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_writer( void *ringbuffer_test_writing_state );\r
+ void ringbuffer_test_reading_and_writing( void );\r
+ thread_return_t CALLING_CONVENTION ringbuffer_test_thread_reader_writer( void *ringbuffer_test_reading_and_writing_state );\r
+\r
+void test_lfds611_slist( void );\r
+ void test_slist_new_delete_get( void );\r
+ thread_return_t CALLING_CONVENTION slist_test_internal_thread_new_delete_get_new_head_and_next( void *slist_test_state );\r
+ thread_return_t CALLING_CONVENTION slist_test_internal_thread_new_delete_get_delete_and_get( void *slist_test_state );\r
+ void test_slist_get_set_user_data( void );\r
+ thread_return_t CALLING_CONVENTION slist_test_internal_thread_get_set_user_data( void *slist_test_state );\r
+ void test_slist_delete_all_elements( void );\r
+\r
+void test_lfds611_stack( void );\r
+ void stack_test_internal_popping( void );\r
+ thread_return_t CALLING_CONVENTION stack_test_internal_thread_popping( void *stack_test_popping_state );\r
+ void stack_test_internal_pushing( void );\r
+ thread_return_t CALLING_CONVENTION stack_test_internal_thread_pushing( void *stack_test_pushing_state );\r
+ void stack_test_internal_popping_and_pushing( void );\r
+ thread_return_t CALLING_CONVENTION stack_test_internal_thread_popping_and_pushing_start_popping( void *stack_test_popping_and_pushing_state );\r
+ thread_return_t CALLING_CONVENTION stack_test_internal_thread_popping_and_pushing_start_pushing( void *stack_test_popping_and_pushing_state );\r
+ void stack_test_internal_rapid_popping_and_pushing( void );\r
+ thread_return_t CALLING_CONVENTION stack_test_internal_thread_rapid_popping_and_pushing( void *stack_state );\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int main( int argc, char **argv )\r
+{\r
+ enum lfds611_test_operation\r
+ operation = UNKNOWN;\r
+\r
+ unsigned int\r
+ loop,\r
+ iterations = 1;\r
+\r
+ assert( argc >= 1 );\r
+ assert( argv != NULL );\r
+\r
+ if( argc == 1 or argc >= 4 )\r
+ operation = HELP;\r
+\r
+ if( operation == UNKNOWN )\r
+ {\r
+ if( 0 == strcmp(*(argv+1), "test") )\r
+ {\r
+ operation = TEST;\r
+\r
+ // TRD : sscanf() may fail, but iterations is initialised to 1, so it's okay\r
+ if( argc == 3 )\r
+ sscanf( *(argv+2), "%u", &iterations );\r
+ }\r
+\r
+ if( 0 == strcmp(*(argv+1), "benchmark") )\r
+ {\r
+ operation = BENCHMARK;\r
+\r
+ // TRD : sscanf() may fail, but iterations is initialised to 1, so it's okay\r
+ if( argc == 3 )\r
+ sscanf( *(argv+2), "%u", &iterations );\r
+ }\r
+ }\r
+\r
+ switch( operation )\r
+ {\r
+ case UNKNOWN:\r
+ case HELP:\r
+ printf( "test [test|benchmark] [iterations]\n"\r
+ " test : run the test suite\n"\r
+ " benchmark : run the benchmark suite\n"\r
+ " iterations : optional, default is 1\n" );\r
+ break;\r
+\r
+ case TEST:\r
+ for( loop = 1 ; loop < iterations+1 ; loop++ )\r
+ {\r
+ printf( "\n"\r
+ "Test Iteration %02u\n"\r
+ "=================\n", loop );\r
+\r
+ test_lfds611_abstraction();\r
+ test_lfds611_freelist();\r
+ test_lfds611_queue();\r
+ test_lfds611_ringbuffer();\r
+ test_lfds611_slist();\r
+ test_lfds611_stack();\r
+ }\r
+ break;\r
+\r
+ case BENCHMARK:\r
+ for( loop = 1 ; loop < iterations+1 ; loop++ )\r
+ {\r
+ printf( "\n"\r
+ "Benchmark Iteration %02u\n"\r
+ "========================\n", loop );\r
+\r
+ benchmark_lfds611_freelist();\r
+ benchmark_lfds611_queue();\r
+ benchmark_lfds611_ringbuffer();\r
+ benchmark_lfds611_stack();\r
+ }\r
+ break;\r
+ }\r
+\r
+ return( EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void internal_display_test_name( char *test_name )\r
+{\r
+ assert( test_name != NULL );\r
+\r
+ printf( "%s...", test_name );\r
+ fflush( stdout );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void internal_display_test_result( unsigned int number_name_dvs_pairs, ... )\r
+{\r
+ va_list\r
+ va;\r
+\r
+ int\r
+ passed_flag = RAISED;\r
+\r
+ unsigned int\r
+ loop;\r
+\r
+ char\r
+ *name;\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs;\r
+\r
+ // TRD : number_name_dvs_pairs can be any value in its range\r
+\r
+ va_start( va, number_name_dvs_pairs );\r
+\r
+ for( loop = 0 ; loop < number_name_dvs_pairs ; loop++ )\r
+ {\r
+ name = va_arg( va, char * );\r
+ dvs = va_arg( va, enum lfds611_data_structure_validity );\r
+\r
+ if( dvs != LFDS611_VALIDITY_VALID )\r
+ {\r
+ passed_flag = LOWERED;\r
+ break;\r
+ }\r
+ }\r
+\r
+ va_end( va );\r
+\r
+ if( passed_flag == RAISED )\r
+ puts( "passed" );\r
+\r
+ if( passed_flag == LOWERED )\r
+ {\r
+ printf( "failed (" );\r
+\r
+ va_start( va, number_name_dvs_pairs );\r
+\r
+ for( loop = 0 ; loop < number_name_dvs_pairs ; loop++ )\r
+ {\r
+ name = va_arg( va, char * );\r
+ dvs = va_arg( va, enum lfds611_data_structure_validity );\r
+\r
+ printf( "%s ", name );\r
+ internal_display_lfds611_data_structure_validity( dvs );\r
+\r
+ if( loop+1 < number_name_dvs_pairs )\r
+ printf( ", " );\r
+ }\r
+\r
+ va_end( va );\r
+\r
+ printf( ")\n" );\r
+ }\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void internal_display_lfds611_data_structure_validity( enum lfds611_data_structure_validity dvs )\r
+{\r
+ char\r
+ *string = NULL;\r
+\r
+ switch( dvs )\r
+ {\r
+ case LFDS611_VALIDITY_VALID:\r
+ string = "valid";\r
+ break;\r
+\r
+ case LFDS611_VALIDITY_INVALID_LOOP:\r
+ string = "invalid - loop detected";\r
+ break;\r
+\r
+ case LFDS611_VALIDITY_INVALID_MISSING_ELEMENTS:\r
+ string = "invalid - missing elements";\r
+ break;\r
+\r
+ case LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS:\r
+ string = "invalid - additional elements";\r
+ break;\r
+\r
+ case LFDS611_VALIDITY_INVALID_TEST_DATA:\r
+ string = "invalid - invalid test data";\r
+ break;\r
+ }\r
+\r
+ printf( "%s", string );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+/***** structs *****/\r
+#pragma pack( push, LFDS611_ALIGN_DOUBLE_POINTER )\r
+\r
+/***** abstraction tests *****/\r
+struct abstraction_test_cas_state\r
+{\r
+ volatile lfds611_atom_t\r
+ *shared_counter;\r
+\r
+ lfds611_atom_t\r
+ local_counter;\r
+};\r
+\r
+struct abstraction_test_dcas_state\r
+{\r
+ volatile lfds611_atom_t\r
+ *shared_counter;\r
+\r
+ lfds611_atom_t\r
+ local_counter;\r
+};\r
+\r
+/***** freelist tests *****/\r
+struct freelist_test_popping_state\r
+{\r
+ struct lfds611_freelist_state\r
+ *fs,\r
+ *fs_thread_local;\r
+};\r
+\r
+struct freelist_test_pushing_state\r
+{\r
+ lfds611_atom_t\r
+ *count,\r
+ thread_number;\r
+\r
+ struct lfds611_freelist_state\r
+ *source_fs,\r
+ *fs;\r
+};\r
+\r
+struct freelist_test_popping_and_pushing_state\r
+{\r
+ struct lfds611_freelist_state\r
+ *local_fs,\r
+ *fs;\r
+};\r
+\r
+struct freelist_test_counter_and_thread_number\r
+{\r
+ lfds611_atom_t\r
+ thread_number;\r
+\r
+ unsigned long long int\r
+ counter;\r
+};\r
+\r
+/***** queue tests *****/\r
+struct queue_test_enqueuing_state\r
+{\r
+ struct lfds611_queue_state\r
+ *qs;\r
+\r
+ lfds611_atom_t\r
+ counter;\r
+};\r
+\r
+struct queue_test_dequeuing_state\r
+{\r
+ struct lfds611_queue_state\r
+ *qs;\r
+\r
+ int\r
+ error_flag;\r
+};\r
+\r
+struct queue_test_enqueuing_and_dequeuing_state\r
+{\r
+ struct lfds611_queue_state\r
+ *qs;\r
+\r
+ lfds611_atom_t\r
+ counter,\r
+ thread_number,\r
+ *per_thread_counters;\r
+\r
+ unsigned int\r
+ cpu_count;\r
+\r
+ int\r
+ error_flag;\r
+};\r
+\r
+struct queue_test_rapid_enqueuing_and_dequeuing_state\r
+{\r
+ struct lfds611_queue_state\r
+ *qs;\r
+\r
+ lfds611_atom_t\r
+ counter;\r
+};\r
+\r
+/***** ringbuffer tests *****/\r
+struct ringbuffer_test_reading_state\r
+{\r
+ struct lfds611_ringbuffer_state\r
+ *rs;\r
+\r
+ int\r
+ error_flag;\r
+\r
+ lfds611_atom_t\r
+ read_count;\r
+};\r
+\r
+struct ringbuffer_test_writing_state\r
+{\r
+ struct lfds611_ringbuffer_state\r
+ *rs;\r
+\r
+ lfds611_atom_t\r
+ write_count;\r
+};\r
+\r
+struct ringbuffer_test_reading_and_writing_state\r
+{\r
+ struct lfds611_ringbuffer_state\r
+ *rs;\r
+\r
+ lfds611_atom_t\r
+ counter,\r
+ *per_thread_counters;\r
+\r
+ unsigned int\r
+ cpu_count;\r
+\r
+ int\r
+ error_flag;\r
+};\r
+\r
+/***** slist tests *****/\r
+struct slist_test_state\r
+{\r
+ struct lfds611_slist_state\r
+ *ss;\r
+\r
+ size_t\r
+ create_count,\r
+ delete_count;\r
+\r
+ lfds611_atom_t\r
+ thread_and_count;\r
+};\r
+\r
+/***** stack tests *****/\r
+struct stack_test_popping_state\r
+{\r
+ struct lfds611_stack_state\r
+ *ss,\r
+ *ss_thread_local;\r
+};\r
+\r
+struct stack_test_pushing_state\r
+{\r
+ lfds611_atom_t\r
+ thread_number;\r
+\r
+ struct lfds611_stack_state\r
+ *ss;\r
+};\r
+\r
+struct stack_test_popping_and_pushing_state\r
+{\r
+ struct lfds611_stack_state\r
+ *local_ss,\r
+ *ss;\r
+};\r
+\r
+struct stack_test_counter_and_thread_number\r
+{\r
+ lfds611_atom_t\r
+ thread_number,\r
+ counter;\r
+};\r
+\r
+/***** freelist benchmarks *****/\r
+struct lfds611_freelist_benchmark\r
+{\r
+ struct lfds611_freelist_state\r
+ *fs;\r
+\r
+ lfds611_atom_t\r
+ operation_count;\r
+};\r
+\r
+/***** queue benchmarks *****/\r
+struct lfds611_queue_benchmark\r
+{\r
+ struct lfds611_queue_state\r
+ *qs;\r
+\r
+ lfds611_atom_t\r
+ operation_count;\r
+};\r
+\r
+/***** ringbuffer benchmarks *****/\r
+struct lfds611_ringbuffer_benchmark\r
+{\r
+ struct lfds611_ringbuffer_state\r
+ *rs;\r
+\r
+ lfds611_atom_t\r
+ operation_count;\r
+};\r
+\r
+/***** stack benchmarks *****/\r
+struct lfds611_stack_benchmark\r
+{\r
+ struct lfds611_stack_state\r
+ *ss;\r
+\r
+ lfds611_atom_t\r
+ operation_count;\r
+};\r
+\r
+#pragma pack( pop )\r
+\r
--- /dev/null
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds611_abstraction( void )
+{
+ printf( "\n"
+ "Abstraction Tests\n"
+ "=================\n" );
+
+ abstraction_test_increment();
+ abstraction_test_cas();
+ abstraction_test_dcas();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void abstraction_test_increment( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ LFDS611_ALIGN(LFDS611_ALIGN_SINGLE_POINTER) volatile lfds611_atom_t
+ shared_counter,
+ atomic_shared_counter;
+
+ /* TRD : here we test lfds611_abstraction_increment
+
+ first, we run one thread per CPU where each thread increments
+ a shared counter 10,000,000 times - however, this first test
+ does NOT use atomic increment; it uses "++"
+
+ second, we repeat the exercise, but this time using
+ lfds611_abstraction_increment()
+
+ if the final value in the first test is less than (10,000,000*cpu_count)
+ then the system is sensitive to non-atomic increments; this means if
+ our atomic version of the test passes, we can have some degree of confidence
+ that it works
+
+ if the final value in the first test is in fact correct, then we can't know
+ that our atomic version has changed anything
+
+ and of course if the final value in the atomic test is wrong, we know things
+ are broken
+ */
+
+ internal_display_test_name( "Atomic increment" );
+
+ cpu_count = abstraction_cpu_count();
+
+ shared_counter = 0;
+ atomic_shared_counter = 0;
+
+ LFDS611_BARRIER_STORE;
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ // TRD : non-atomic
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_increment, (void *) &shared_counter );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ // TRD : atomic
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_atomic_increment, (void *) &atomic_shared_counter );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : results
+ if( shared_counter < (10000000 * cpu_count) and atomic_shared_counter == (10000000 * cpu_count) )
+ puts( "passed" );
+
+ if( shared_counter == (10000000 * cpu_count) and atomic_shared_counter == (10000000 * cpu_count) )
+ puts( "indeterminate" );
+
+ if( atomic_shared_counter < (10000000 * cpu_count) )
+ puts( "failed" );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_increment( void *shared_counter )
+{
+ assert( shared_counter != NULL );
+
+ LFDS611_BARRIER_LOAD;
+
+ lfds611_liblfds_abstraction_test_helper_increment_non_atomic( shared_counter );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_atomic_increment( void *shared_counter )
+{
+ assert( shared_counter != NULL );
+
+ LFDS611_BARRIER_LOAD;
+
+ lfds611_liblfds_abstraction_test_helper_increment_atomic( shared_counter );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void abstraction_test_cas( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct abstraction_test_cas_state
+ *atcs;
+
+ LFDS611_ALIGN(LFDS611_ALIGN_SINGLE_POINTER) volatile lfds611_atom_t
+ shared_counter;
+
+ lfds611_atom_t
+ local_total = 0;
+
+ // TRD : number_logical_processors can be any value in its range
+
+ /* TRD : here we test lfds611_abstraction_cas
+
+ we run one thread per CPU
+ we use lfds611_abstraction_cas() to increment a shared counter
+ every time a thread successfully increments the counter,
+ it increments a thread local counter
+ the threads run for ten seconds
+ after the threads finish, we total the local counters
+ they should equal the shared counter
+ */
+
+ internal_display_test_name( "Atomic CAS" );
+
+ cpu_count = abstraction_cpu_count();
+
+ shared_counter = 0;
+
+ LFDS611_BARRIER_STORE;
+
+ atcs = malloc( sizeof(struct abstraction_test_cas_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (atcs+loop)->shared_counter = &shared_counter;
+ (atcs+loop)->local_counter = 0;
+ }
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_cas, atcs+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : results
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ local_total += (atcs+loop)->local_counter;
+
+ if( local_total == shared_counter )
+ puts( "passed" );
+
+ if( local_total != shared_counter )
+ puts( "failed" );
+
+ // TRD : cleanup
+ free( atcs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_cas( void *abstraction_test_cas_state )
+{
+ struct abstraction_test_cas_state
+ *atcs;
+
+ assert( abstraction_test_cas_state != NULL );
+
+ atcs = (struct abstraction_test_cas_state *) abstraction_test_cas_state;
+
+ LFDS611_BARRIER_LOAD;
+
+ lfds611_liblfds_abstraction_test_helper_cas( atcs->shared_counter, &atcs->local_counter );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+void abstraction_test_dcas( void )
+{
+ unsigned int
+ loop,
+ cpu_count;
+
+ thread_state_t
+ *thread_handles;
+
+ struct abstraction_test_dcas_state
+ *atds;
+
+ LFDS611_ALIGN(LFDS611_ALIGN_DOUBLE_POINTER) volatile lfds611_atom_t
+ shared_counter[2] = { 0, 0 };
+
+ lfds611_atom_t
+ local_total = 0;
+
+ /* TRD : here we test lfds611_abstraction_dcas
+
+ we run one thread per CPU
+ we use lfds611_abstraction_dcas() to increment a shared counter
+ every time a thread successfully increments the counter,
+ it increments a thread local counter
+ the threads run for ten seconds
+ after the threads finish, we total the local counters
+ they should equal the shared counter
+ */
+
+ internal_display_test_name( "Atomic DCAS" );
+
+ cpu_count = abstraction_cpu_count();
+
+ atds = malloc( sizeof(struct abstraction_test_dcas_state) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ {
+ (atds+loop)->shared_counter = shared_counter;
+ (atds+loop)->local_counter = 0;
+ }
+
+ LFDS611_BARRIER_STORE;
+
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_start( &thread_handles[loop], loop, abstraction_test_internal_thread_dcas, atds+loop );
+
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ abstraction_thread_wait( thread_handles[loop] );
+
+ free( thread_handles );
+
+ // TRD : results
+ for( loop = 0 ; loop < cpu_count ; loop++ )
+ local_total += (atds+loop)->local_counter;
+
+ if( local_total == shared_counter[0] )
+ puts( "passed" );
+
+ if( local_total != shared_counter[0] )
+ puts( "failed" );
+
+ // TRD : cleanup
+ free( atds );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+thread_return_t CALLING_CONVENTION abstraction_test_internal_thread_dcas( void *abstraction_test_dcas_state )
+{
+ struct abstraction_test_dcas_state
+ *atds;
+
+ assert( abstraction_test_dcas_state != NULL );
+
+ atds = (struct abstraction_test_dcas_state *) abstraction_test_dcas_state;
+
+ LFDS611_BARRIER_LOAD;
+
+ lfds611_liblfds_abstraction_test_helper_dcas( atds->shared_counter, &atds->local_counter );
+
+ return( (thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_lfds611_freelist( void )\r
+{\r
+ printf( "\n"\r
+ "Freelist Tests\n"\r
+ "==============\n" );\r
+\r
+ freelist_test_internal_popping();\r
+ freelist_test_internal_pushing();\r
+ freelist_test_internal_popping_and_pushing();\r
+ freelist_test_internal_rapid_popping_and_pushing();\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void freelist_test_internal_popping( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ lfds611_atom_t\r
+ count = 0;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs = LFDS611_VALIDITY_VALID;\r
+\r
+ struct lfds611_freelist_state\r
+ *fs;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ struct freelist_test_popping_state\r
+ *ftps;\r
+\r
+ unsigned int\r
+ *found_count;\r
+\r
+ /* TRD : we create a freelist with 1,000,000 elements\r
+\r
+ the creation function runs in a single thread and creates\r
+ and pushes those elements onto the freelist\r
+\r
+ each element contains a void pointer which is its element number\r
+\r
+ we then run one thread per CPU\r
+ where each thread loops, popping as quickly as possible\r
+ each popped element is pushed onto a thread-local freelist\r
+\r
+ the threads run till the source freelist is empty\r
+\r
+ we then check the thread-local freelists\r
+ we should find we have every element\r
+\r
+ then tidy up\r
+ */\r
+\r
+ internal_display_test_name( "Popping" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_freelist_new( &fs, 1000000, freelist_test_internal_popping_init, &count );\r
+ ftps = malloc( sizeof(struct freelist_test_popping_state) * cpu_count );\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (ftps+loop)->fs = fs;\r
+ lfds611_freelist_new( &(ftps+loop)->fs_thread_local, 0, NULL, NULL );\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_popping, ftps+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : now we check the thread-local freelists\r
+ found_count = malloc( sizeof(unsigned int) * 1000000 );\r
+ for( loop = 0 ; loop < 1000000 ; loop++ )\r
+ *(found_count+loop) = 0;\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ while( lfds611_freelist_pop((ftps+loop)->fs_thread_local, &fe) )\r
+ {\r
+ lfds611_freelist_get_user_data_from_element( fe, (void **) &count );\r
+ (*(found_count+count))++;\r
+ lfds611_freelist_push( fs, fe );\r
+ }\r
+ }\r
+\r
+ for( loop = 0 ; loop < 1000000 and dvs == LFDS611_VALIDITY_VALID ; loop++ )\r
+ {\r
+ if( *(found_count+loop) == 0 )\r
+ dvs = LFDS611_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ if( *(found_count+loop) > 1 )\r
+ dvs = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+ }\r
+\r
+ // TRD : cleanup\r
+ free( found_count );\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ lfds611_freelist_delete( (ftps+loop)->fs_thread_local, NULL, NULL );\r
+ free( ftps );\r
+ lfds611_freelist_delete( fs, NULL, NULL );\r
+\r
+ // TRD : print the test result\r
+ internal_display_test_result( 1, "freelist", dvs );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+int freelist_test_internal_popping_init( void **user_data, void *user_state )\r
+{\r
+ lfds611_atom_t\r
+ *count;\r
+\r
+ assert( user_data != NULL );\r
+ assert( user_state != NULL );\r
+\r
+ count = (lfds611_atom_t *) user_state;\r
+\r
+ *(lfds611_atom_t *) user_data = (*count)++;\r
+\r
+ return( 1 );\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping( void *freelist_test_popping_state )\r
+{\r
+ struct freelist_test_popping_state\r
+ *ftps;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ assert( freelist_test_popping_state != NULL );\r
+\r
+ ftps = (struct freelist_test_popping_state *) freelist_test_popping_state;\r
+\r
+ lfds611_freelist_use( ftps->fs );\r
+ lfds611_freelist_use( ftps->fs_thread_local );\r
+\r
+ while( lfds611_freelist_pop(ftps->fs, &fe) )\r
+ lfds611_freelist_push( ftps->fs_thread_local, fe );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void freelist_test_internal_pushing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ lfds611_atom_t\r
+ count = 0;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs;\r
+\r
+ struct freelist_test_pushing_state\r
+ *ftps;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ struct lfds611_freelist_state\r
+ *fs,\r
+ *cleanup_fs;\r
+\r
+ struct freelist_test_counter_and_thread_number\r
+ *cnt,\r
+ *counter_and_number_trackers;\r
+\r
+ struct lfds611_validation_info\r
+ vi;\r
+\r
+ /* TRD : we create an empty freelist, which we will push to\r
+\r
+ we then create one freelist per CPU, where this freelist\r
+ contains 100,000 elements per thread and\r
+ each element is an incrementing counter and unique ID\r
+ (from 0 to number of CPUs)\r
+\r
+ we then start one thread per CPU, where each thread is\r
+ given one of the populated freelists and pops from that\r
+ to push to the empty freelist\r
+\r
+ the reason for this is to achieve memory pre-allocation\r
+ which allows the pushing threads to run at maximum speed\r
+\r
+ the threads end when their freelists are empty\r
+\r
+ we then fully pop the now populated main freelist (onto\r
+ a second freelist, so we can cleanly free all memory),\r
+ checking that the counts increment on a per unique ID basis\r
+ and that the number of elements we pop equals 1,000,000 per thread\r
+ (since each element has an incrementing counter which is\r
+ unique on a per unique ID basis, we can know we didn't lose\r
+ any elements)\r
+ */\r
+\r
+ internal_display_test_name( "Pushing" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ ftps = malloc( sizeof(struct freelist_test_pushing_state) * cpu_count );\r
+\r
+ lfds611_freelist_new( &fs, 0, NULL, NULL );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (ftps+loop)->thread_number = (lfds611_atom_t) loop;\r
+ // TRD : note count is shared across threads, so thread 0 is 0-100000, thread 1 is 100000-200000, etc\r
+ (ftps+loop)->count = &count;\r
+ lfds611_freelist_new( &(ftps+loop)->source_fs, 100000, freelist_test_internal_pushing_init, (void *) (ftps+loop) );\r
+ (ftps+loop)->fs = fs;\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_pushing, ftps+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : now fully pop and verify the main freelist\r
+ lfds611_freelist_new( &cleanup_fs, 0, NULL, NULL );\r
+\r
+ counter_and_number_trackers = malloc( sizeof(struct freelist_test_counter_and_thread_number) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (counter_and_number_trackers+loop)->counter = 100000 * loop;\r
+ (counter_and_number_trackers+loop)->thread_number = (lfds611_atom_t) loop;\r
+ }\r
+\r
+ vi.min_elements = vi.max_elements = 100000 * cpu_count;\r
+\r
+ lfds611_freelist_query( fs, LFDS611_FREELIST_QUERY_VALIDATE, &vi, (void *) &dvs );\r
+\r
+ while( dvs == LFDS611_VALIDITY_VALID and lfds611_freelist_pop(fs, &fe) )\r
+ {\r
+ lfds611_freelist_get_user_data_from_element( fe, (void **) &cnt );\r
+\r
+ if( cnt->counter != (counter_and_number_trackers+cnt->thread_number)->counter++ )\r
+ dvs = LFDS611_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ lfds611_freelist_push( cleanup_fs, fe );\r
+ }\r
+\r
+ // TRD : clean up\r
+ free( counter_and_number_trackers );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ lfds611_freelist_delete( (ftps+loop)->source_fs, NULL, NULL );\r
+\r
+ free( ftps );\r
+\r
+ lfds611_freelist_delete( cleanup_fs, freelist_test_internal_pushing_delete, NULL );\r
+ lfds611_freelist_delete( fs, NULL, NULL );\r
+\r
+ // TRD : print the test result\r
+ internal_display_test_result( 1, "freelist", dvs );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+int freelist_test_internal_pushing_init( void **user_data, void *user_state )\r
+{\r
+ struct freelist_test_counter_and_thread_number\r
+ *ftcatn;\r
+\r
+ struct freelist_test_pushing_state\r
+ *ftps;\r
+\r
+ assert( user_data != NULL );\r
+ // TRD : user_state is being used as an integer type\r
+\r
+ *user_data = malloc( sizeof(struct freelist_test_counter_and_thread_number) );\r
+ ftps = (struct freelist_test_pushing_state *) user_state;\r
+\r
+ ftcatn = (struct freelist_test_counter_and_thread_number *) *user_data;\r
+\r
+ ftcatn->counter = (*ftps->count)++;\r
+ ftcatn->thread_number = ftps->thread_number;\r
+\r
+ return( 1 );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+#pragma warning( disable : 4100 )\r
+\r
+void freelist_test_internal_pushing_delete( void *user_data, void *user_state )\r
+{\r
+ assert( user_data != NULL );\r
+ assert( user_state == NULL );\r
+\r
+ free( user_data );\r
+\r
+ return;\r
+}\r
+\r
+#pragma warning( default : 4100 )\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_pushing( void *freelist_test_pushing_state )\r
+{\r
+ struct freelist_test_pushing_state\r
+ *ftps;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ assert( freelist_test_pushing_state != NULL );\r
+\r
+ ftps = (struct freelist_test_pushing_state *) freelist_test_pushing_state;\r
+\r
+ lfds611_freelist_use( ftps->source_fs );\r
+ lfds611_freelist_use( ftps->fs );\r
+\r
+ while( lfds611_freelist_pop(ftps->source_fs, &fe) )\r
+ lfds611_freelist_push( ftps->fs, fe );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void freelist_test_internal_popping_and_pushing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs;\r
+\r
+ struct lfds611_freelist_state\r
+ *fs;\r
+\r
+ struct freelist_test_popping_and_pushing_state\r
+ *pps;\r
+\r
+ struct lfds611_validation_info\r
+ vi;\r
+\r
+ /* TRD : we have two threads per CPU\r
+ the threads loop for ten seconds\r
+ the first thread pushes 100000 elements then pops 100000 elements\r
+ the second thread pops 100000 elements then pushes 100000 elements\r
+ all pushes and pops go onto the single main freelist\r
+\r
+ after time is up, all threads push what they have remaining onto\r
+ the main freelist\r
+\r
+ we then validate the main freelist\r
+ */\r
+\r
+ internal_display_test_name( "Popping and pushing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_freelist_new( &fs, 100000 * cpu_count, NULL, NULL );\r
+\r
+ pps = malloc( sizeof(struct freelist_test_popping_and_pushing_state) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (pps+loop)->fs = fs;\r
+ lfds611_freelist_new( &(pps+loop)->local_fs, 0, NULL, NULL );\r
+\r
+ (pps+loop+cpu_count)->fs = fs;\r
+ lfds611_freelist_new( &(pps+loop+cpu_count)->local_fs, 100000, NULL, NULL );\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_popping_and_pushing_start_popping, pps+loop );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, freelist_test_internal_thread_popping_and_pushing_start_pushing, pps+loop+cpu_count );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ lfds611_freelist_delete( (pps+loop)->local_fs, NULL, NULL );\r
+\r
+ free( pps );\r
+\r
+ vi.min_elements = vi.max_elements = 100000 * cpu_count * 2;\r
+\r
+ lfds611_freelist_query( fs, LFDS611_FREELIST_QUERY_VALIDATE, (void *) &vi, (void *) &dvs );\r
+\r
+ lfds611_freelist_delete( fs, NULL, NULL );\r
+\r
+ // TRD : print the test result\r
+ internal_display_test_result( 1, "freelist", dvs );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_popping( void *freelist_test_popping_and_pushing_state )\r
+{\r
+ struct freelist_test_popping_and_pushing_state\r
+ *pps;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ unsigned int\r
+ count;\r
+\r
+ assert( freelist_test_popping_and_pushing_state != NULL );\r
+\r
+ pps = (struct freelist_test_popping_and_pushing_state *) freelist_test_popping_and_pushing_state;\r
+\r
+ lfds611_freelist_use( pps->fs );\r
+ lfds611_freelist_use( pps->local_fs );\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ count = 0;\r
+\r
+ while( count < 100000 )\r
+ {\r
+ lfds611_freelist_pop( pps->fs, &fe );\r
+\r
+ if( fe != NULL )\r
+ {\r
+ lfds611_freelist_push( pps->local_fs, fe );\r
+ count++;\r
+ }\r
+ }\r
+\r
+ while( lfds611_freelist_pop(pps->local_fs, &fe) )\r
+ lfds611_freelist_push( pps->fs, fe );\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_popping_and_pushing_start_pushing( void *freelist_test_popping_and_pushing_state )\r
+{\r
+ struct freelist_test_popping_and_pushing_state\r
+ *pps;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ unsigned int\r
+ count;\r
+\r
+ assert( freelist_test_popping_and_pushing_state != NULL );\r
+\r
+ pps = (struct freelist_test_popping_and_pushing_state *) freelist_test_popping_and_pushing_state;\r
+\r
+ lfds611_freelist_use( pps->fs );\r
+ lfds611_freelist_use( pps->local_fs );\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ while( lfds611_freelist_pop(pps->local_fs, &fe) )\r
+ lfds611_freelist_push( pps->fs, fe );\r
+\r
+ count = 0;\r
+\r
+ while( count < 1000 )\r
+ {\r
+ lfds611_freelist_pop( pps->fs, &fe );\r
+\r
+ if( fe != NULL )\r
+ {\r
+ lfds611_freelist_push( pps->local_fs, fe );\r
+ count++;\r
+ }\r
+ }\r
+ }\r
+\r
+ // TRD : now push whatever we have in our local freelist\r
+ while( lfds611_freelist_pop(pps->local_fs, &fe) )\r
+ lfds611_freelist_push( pps->fs, fe );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void freelist_test_internal_rapid_popping_and_pushing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds611_freelist_state\r
+ *fs;\r
+\r
+ struct lfds611_validation_info\r
+ vi;\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs;\r
+\r
+ /* TRD : in these tests there is a fundamental antagonism between\r
+ how much checking/memory clean up that we do and the\r
+ likelyhood of collisions between threads in their lock-free\r
+ operations\r
+\r
+ the lock-free operations are very quick; if we do anything\r
+ much at all between operations, we greatly reduce the chance\r
+ of threads colliding\r
+\r
+ so we have some tests which do enough checking/clean up that\r
+ they can tell the freelist is valid and don't leak memory\r
+ and here, this test now is one of those which does minimal\r
+ checking - in fact, the nature of the test is that you can't\r
+ do any real checking - but goes very quickly\r
+\r
+ what we do is create a small freelist and then run one thread\r
+ per CPU, where each thread simply pops and then immediately\r
+ pushes\r
+\r
+ the test runs for ten seconds\r
+\r
+ after the test is done, the only check we do is to traverse\r
+ the freelist, checking for loops and ensuring the number of\r
+ elements is correct\r
+ */\r
+\r
+ internal_display_test_name( "Rapid popping and pushing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_freelist_new( &fs, cpu_count, NULL, NULL );\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, freelist_test_internal_thread_rapid_popping_and_pushing, fs );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ vi.min_elements = cpu_count;\r
+ vi.max_elements = cpu_count;\r
+\r
+ lfds611_freelist_query( fs, LFDS611_FREELIST_QUERY_VALIDATE, (void *) &vi, (void *) &dvs );\r
+\r
+ lfds611_freelist_delete( fs, NULL, NULL );\r
+\r
+ // TRD : print the test result\r
+ internal_display_test_result( 1, "freelist", dvs );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION freelist_test_internal_thread_rapid_popping_and_pushing( void *lfds611_freelist_state )\r
+{\r
+ struct lfds611_freelist_state\r
+ *fs;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ assert( lfds611_freelist_state != NULL );\r
+\r
+ fs = (struct lfds611_freelist_state *) lfds611_freelist_state;\r
+\r
+ lfds611_freelist_use( fs );\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds611_freelist_pop( fs, &fe );\r
+ lfds611_freelist_push( fs, fe );\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_lfds611_queue( void )\r
+{\r
+ printf( "\n"\r
+ "Queue Tests\n"\r
+ "===========\n" );\r
+\r
+ queue_test_enqueuing();\r
+ queue_test_dequeuing();\r
+ queue_test_enqueuing_and_dequeuing();\r
+ queue_test_rapid_enqueuing_and_dequeuing();\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void queue_test_enqueuing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds611_queue_state\r
+ *qs;\r
+\r
+ struct queue_test_enqueuing_state\r
+ *qtes;\r
+\r
+ lfds611_atom_t\r
+ user_data,\r
+ thread,\r
+ count,\r
+ *per_thread_counters;\r
+\r
+ struct lfds611_validation_info\r
+ vi = { 1000000, 1000000 };\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs[2];\r
+\r
+ /* TRD : create an empty queue with 1,000,000 elements in its freelist\r
+ then run one thread per CPU\r
+ where each thread busy-works, enqueuing elements (until there are no more elements)\r
+ each element's void pointer of user data is (thread number | element number)\r
+ where element_number is a thread-local counter starting at 0\r
+ where the thread_number occupies the top byte\r
+\r
+ when we're done, we check that all the elements are present\r
+ and increment on a per-thread basis\r
+ */\r
+\r
+ internal_display_test_name( "Enqueuing" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_queue_new( &qs, 1000000 );\r
+\r
+ qtes = malloc( sizeof(struct queue_test_enqueuing_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (qtes+loop)->qs = qs;\r
+ (qtes+loop)->counter = (lfds611_atom_t) loop << (sizeof(lfds611_atom_t)*8-8);\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_simple_enqueuer, qtes+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ free( qtes );\r
+\r
+ /* TRD : first, validate the queue\r
+\r
+ then dequeue\r
+ we expect to find element numbers increment on a per thread basis\r
+ */\r
+\r
+ lfds611_queue_query( qs, LFDS611_QUEUE_QUERY_VALIDATE, &vi, dvs );\r
+\r
+ per_thread_counters = malloc( sizeof(lfds611_atom_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ *(per_thread_counters+loop) = 0;\r
+\r
+ while( dvs[0] == LFDS611_VALIDITY_VALID and dvs[1] == LFDS611_VALIDITY_VALID and lfds611_queue_dequeue(qs, (void *) &user_data) )\r
+ {\r
+ thread = user_data >> (sizeof(lfds611_atom_t)*8-8);\r
+ count = (user_data << 8) >> 8;\r
+\r
+ if( thread >= cpu_count )\r
+ {\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_TEST_DATA;\r
+ break;\r
+ }\r
+\r
+ if( count < per_thread_counters[thread] )\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+\r
+ if( count > per_thread_counters[thread] )\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ if( count == per_thread_counters[thread] )\r
+ per_thread_counters[thread]++;\r
+ }\r
+\r
+ free( per_thread_counters );\r
+\r
+ lfds611_queue_delete( qs, NULL, NULL );\r
+\r
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_enqueuer( void *queue_test_enqueuing_state )\r
+{\r
+ struct queue_test_enqueuing_state\r
+ *qtes;\r
+\r
+ assert( queue_test_enqueuing_state != NULL );\r
+\r
+ qtes = (struct queue_test_enqueuing_state *) queue_test_enqueuing_state;\r
+\r
+ lfds611_queue_use( qtes->qs );\r
+\r
+ // TRD : top byte of counter is already our thread number\r
+ while( lfds611_queue_enqueue(qtes->qs, (void *) qtes->counter++) );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void queue_test_dequeuing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds611_queue_state\r
+ *qs;\r
+\r
+ struct queue_test_dequeuing_state\r
+ *qtds;\r
+\r
+ struct lfds611_validation_info\r
+ vi = { 0, 0 };\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs[2];\r
+\r
+ /* TRD : create a queue with 1,000,000 elements\r
+\r
+ use a single thread to enqueue every element\r
+ each elements user data is an incrementing counter\r
+\r
+ then run one thread per CPU\r
+ where each busy-works dequeuing\r
+\r
+ when an element is dequeued, we check (on a per-thread basis) the\r
+ value deqeued is greater than the element previously dequeued\r
+ */\r
+\r
+ internal_display_test_name( "Dequeuing" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_queue_new( &qs, 1000000 );\r
+\r
+ for( loop = 0 ; loop < 1000000 ; loop++ )\r
+ lfds611_queue_enqueue( qs, (void *) (lfds611_atom_t) loop );\r
+\r
+ qtds = malloc( sizeof(struct queue_test_dequeuing_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (qtds+loop)->qs = qs;\r
+ (qtds+loop)->error_flag = LOWERED;\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_simple_dequeuer, qtds+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : check queue is empty\r
+ lfds611_queue_query( qs, LFDS611_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ // TRD : check for raised error flags\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ if( (qtds+loop)->error_flag == RAISED )\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_TEST_DATA;\r
+\r
+ free( qtds );\r
+\r
+ lfds611_queue_delete( qs, NULL, NULL );\r
+\r
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_simple_dequeuer( void *queue_test_dequeuing_state )\r
+{\r
+ struct queue_test_dequeuing_state\r
+ *qtds;\r
+\r
+ lfds611_atom_t\r
+ *prev_user_data,\r
+ *user_data;\r
+\r
+ assert( queue_test_dequeuing_state != NULL );\r
+\r
+ qtds = (struct queue_test_dequeuing_state *) queue_test_dequeuing_state;\r
+\r
+ lfds611_queue_use( qtds->qs );\r
+\r
+ lfds611_queue_dequeue( qtds->qs, (void *) &prev_user_data );\r
+\r
+ while( lfds611_queue_dequeue(qtds->qs, (void *) &user_data) )\r
+ {\r
+ if( user_data <= prev_user_data )\r
+ qtds->error_flag = RAISED;\r
+\r
+ prev_user_data = user_data;\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void queue_test_enqueuing_and_dequeuing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ subloop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds611_queue_state\r
+ *qs;\r
+\r
+ struct queue_test_enqueuing_and_dequeuing_state\r
+ *qteds;\r
+\r
+ struct lfds611_validation_info\r
+ vi = { 0, 0 };\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs[2];\r
+\r
+ internal_display_test_name( "Enqueuing and dequeuing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_queue_new( &qs, cpu_count );\r
+\r
+ qteds = malloc( sizeof(struct queue_test_enqueuing_and_dequeuing_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (qteds+loop)->qs = qs;\r
+ (qteds+loop)->thread_number = loop;\r
+ (qteds+loop)->counter = (lfds611_atom_t) loop << (sizeof(lfds611_atom_t)*8-8);\r
+ (qteds+loop)->cpu_count = cpu_count;\r
+ (qteds+loop)->error_flag = LOWERED;\r
+ (qteds+loop)->per_thread_counters = malloc( sizeof(lfds611_atom_t) * cpu_count );\r
+\r
+ for( subloop = 0 ; subloop < cpu_count ; subloop++ )\r
+ *((qteds+loop)->per_thread_counters+subloop) = 0;\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_enqueuer_and_dequeuer, qteds+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ lfds611_queue_query( qs, LFDS611_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ if( (qteds+loop)->error_flag == RAISED )\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_TEST_DATA;\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ free( (qteds+loop)->per_thread_counters );\r
+\r
+ free( qteds );\r
+\r
+ lfds611_queue_delete( qs, NULL, NULL );\r
+\r
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_enqueuer_and_dequeuer( void *queue_test_enqueuing_and_dequeuing_state )\r
+{\r
+ struct queue_test_enqueuing_and_dequeuing_state\r
+ *qteds;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ lfds611_atom_t\r
+ thread,\r
+ count,\r
+ user_data;\r
+\r
+ assert( queue_test_enqueuing_and_dequeuing_state != NULL );\r
+\r
+ qteds = (struct queue_test_enqueuing_and_dequeuing_state *) queue_test_enqueuing_and_dequeuing_state;\r
+\r
+ lfds611_queue_use( qteds->qs );\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds611_queue_enqueue( qteds->qs, (void *) (qteds->counter++) );\r
+ lfds611_queue_dequeue( qteds->qs, (void *) &user_data );\r
+\r
+ thread = user_data >> (sizeof(lfds611_atom_t)*8-8);\r
+ count = (user_data << 8) >> 8;\r
+\r
+ if( thread >= qteds->cpu_count )\r
+ qteds->error_flag = RAISED;\r
+ else\r
+ {\r
+ if( count < qteds->per_thread_counters[thread] )\r
+ qteds->error_flag = RAISED;\r
+\r
+ if( count >= qteds->per_thread_counters[thread] )\r
+ qteds->per_thread_counters[thread] = count+1;\r
+ }\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void queue_test_rapid_enqueuing_and_dequeuing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds611_queue_state\r
+ *qs;\r
+\r
+ struct queue_test_rapid_enqueuing_and_dequeuing_state\r
+ *qtreds;\r
+\r
+ struct lfds611_validation_info\r
+ vi = { 50000, 50000 };\r
+\r
+ lfds611_atom_t\r
+ user_data,\r
+ thread,\r
+ count,\r
+ *per_thread_counters;\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs[2];\r
+\r
+ internal_display_test_name( "Rapid enqueuing and dequeuing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_queue_new( &qs, 100000 );\r
+\r
+ for( loop = 0 ; loop < 50000 ; loop++ )\r
+ lfds611_queue_enqueue( qs, NULL );\r
+\r
+ qtreds = malloc( sizeof(struct queue_test_rapid_enqueuing_and_dequeuing_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (qtreds+loop)->qs = qs;\r
+ (qtreds+loop)->counter = (lfds611_atom_t) loop << (sizeof(lfds611_atom_t)*8-8);\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, queue_test_internal_thread_rapid_enqueuer_and_dequeuer, qtreds+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ lfds611_queue_query( qs, LFDS611_QUEUE_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ // TRD : now check results\r
+ per_thread_counters = malloc( sizeof(lfds611_atom_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ *(per_thread_counters+loop) = 0;\r
+\r
+ while( dvs[0] == LFDS611_VALIDITY_VALID and dvs[1] == LFDS611_VALIDITY_VALID and lfds611_queue_dequeue(qs, (void *) &user_data) )\r
+ {\r
+ thread = user_data >> (sizeof(lfds611_atom_t)*8-8);\r
+ count = (user_data << 8) >> 8;\r
+\r
+ if( thread >= cpu_count )\r
+ {\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_TEST_DATA;\r
+ break;\r
+ }\r
+\r
+ if( per_thread_counters[thread] == 0 )\r
+ per_thread_counters[thread] = count;\r
+\r
+ if( count < per_thread_counters[thread] )\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+\r
+ if( count >= per_thread_counters[thread] )\r
+ per_thread_counters[thread] = count+1;\r
+ }\r
+\r
+ free( per_thread_counters );\r
+\r
+ free( qtreds );\r
+\r
+ lfds611_queue_delete( qs, NULL, NULL );\r
+\r
+ internal_display_test_result( 2, "queue", dvs[0], "queue freelist", dvs[1] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION queue_test_internal_thread_rapid_enqueuer_and_dequeuer( void *queue_test_rapid_enqueuing_and_dequeuing_state )\r
+{\r
+ struct queue_test_rapid_enqueuing_and_dequeuing_state\r
+ *qtreds;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ lfds611_atom_t\r
+ user_data;\r
+\r
+ assert( queue_test_rapid_enqueuing_and_dequeuing_state != NULL );\r
+\r
+ qtreds = (struct queue_test_rapid_enqueuing_and_dequeuing_state *) queue_test_rapid_enqueuing_and_dequeuing_state;\r
+\r
+ lfds611_queue_use( qtreds->qs );\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds611_queue_enqueue( qtreds->qs, (void *) (qtreds->counter++) );\r
+ lfds611_queue_dequeue( qtreds->qs, (void *) &user_data );\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_lfds611_ringbuffer( void )\r
+{\r
+ printf( "\n"\r
+ "Ringbuffer Tests\n"\r
+ "================\n" );\r
+\r
+ ringbuffer_test_reading();\r
+ ringbuffer_test_writing();\r
+ ringbuffer_test_reading_and_writing();\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void ringbuffer_test_reading( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds611_ringbuffer_state\r
+ *rs;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ struct ringbuffer_test_reading_state\r
+ *rtrs;\r
+\r
+ struct lfds611_validation_info\r
+ vi = { 0, 0 };\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs[3];\r
+\r
+ lfds611_atom_t\r
+ total_read = 0;\r
+\r
+ /* TRD : we create a single ringbuffer\r
+ with 1,000,000 elements\r
+ we populate the ringbuffer, where the\r
+ user data is an incrementing counter\r
+\r
+ we create one thread per CPU\r
+ where each thread busy-works,\r
+ reading until the ringbuffer is empty\r
+\r
+ each thread keeps track of the number of reads it manages\r
+ and that each user data it reads is greater than the\r
+ previous user data that was read\r
+ */\r
+\r
+ internal_display_test_name( "Reading" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_ringbuffer_new( &rs, 1000000, NULL, NULL );\r
+\r
+ for( loop = 0 ; loop < 1000000 ; loop++ )\r
+ {\r
+ lfds611_ringbuffer_get_write_element( rs, &fe, NULL );\r
+ lfds611_freelist_set_user_data_in_element( fe, (void *) (lfds611_atom_t) loop );\r
+ lfds611_ringbuffer_put_write_element( rs, fe );\r
+ }\r
+\r
+ rtrs = malloc( sizeof(struct ringbuffer_test_reading_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (rtrs+loop)->rs = rs;\r
+ (rtrs+loop)->read_count = 0;\r
+ (rtrs+loop)->error_flag = LOWERED;\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_simple_reader, rtrs+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ lfds611_ringbuffer_query( rs, LFDS611_RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ // TRD : check for raised error flags\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ if( (rtrs+loop)->error_flag == RAISED )\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_TEST_DATA;\r
+\r
+ // TRD : check thread reads total to 1,000,000\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ total_read += (rtrs+loop)->read_count;\r
+\r
+ if( total_read < 1000000 )\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ if( total_read > 1000000 )\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+\r
+ free( rtrs );\r
+\r
+ lfds611_ringbuffer_delete( rs, NULL, NULL );\r
+\r
+ internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_reader( void *ringbuffer_test_reading_state )\r
+{\r
+ struct ringbuffer_test_reading_state\r
+ *rtrs;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ lfds611_atom_t\r
+ *prev_user_data,\r
+ *user_data;\r
+\r
+ assert( ringbuffer_test_reading_state != NULL );\r
+\r
+ rtrs = (struct ringbuffer_test_reading_state *) ringbuffer_test_reading_state;\r
+\r
+ lfds611_ringbuffer_use( rtrs->rs );\r
+\r
+ /* TRD : read an initial element to load a value into prev_user_data\r
+ it may be (under valgrind for example) that by the time we start\r
+ there are no elements remaining to read\r
+ */\r
+\r
+ lfds611_ringbuffer_get_read_element( rtrs->rs, &fe );\r
+ if( fe == NULL )\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+ lfds611_freelist_get_user_data_from_element( fe, (void **) &prev_user_data );\r
+ lfds611_ringbuffer_put_read_element( rtrs->rs, fe );\r
+\r
+ rtrs->read_count++;\r
+\r
+ while( lfds611_ringbuffer_get_read_element(rtrs->rs, &fe) )\r
+ {\r
+ lfds611_freelist_get_user_data_from_element( fe, (void **) &user_data );\r
+ lfds611_ringbuffer_put_read_element( rtrs->rs, fe );\r
+\r
+ if( user_data <= prev_user_data )\r
+ rtrs->error_flag = RAISED;\r
+\r
+ prev_user_data = user_data;\r
+\r
+ rtrs->read_count++;\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void ringbuffer_test_writing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds611_ringbuffer_state\r
+ *rs;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ struct ringbuffer_test_writing_state\r
+ *rtws;\r
+\r
+ struct lfds611_validation_info\r
+ vi = { 100000, 100000 };\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs[3];\r
+\r
+ lfds611_atom_t\r
+ thread,\r
+ count,\r
+ user_data,\r
+ *per_thread_counters;\r
+\r
+ /* TRD : we create a single ringbuffer\r
+ with 100000 elements\r
+ the ringbuffers starts empty\r
+\r
+ we create one thread per CPU\r
+ where each thread busy-works writing\r
+ for ten seconds\r
+\r
+ the user data in each written element is a combination\r
+ of the thread number and the counter\r
+\r
+ after the threads are complete, we validate by\r
+ checking the user data counters increment on a per thread\r
+ basis\r
+ */\r
+\r
+ internal_display_test_name( "Writing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_ringbuffer_new( &rs, 100000, NULL, NULL );\r
+\r
+ rtws = malloc( sizeof(struct ringbuffer_test_writing_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (rtws+loop)->rs = rs;\r
+ (rtws+loop)->write_count = (lfds611_atom_t) loop << (sizeof(lfds611_atom_t)*8-8);\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_simple_writer, rtws+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : now check results\r
+ per_thread_counters = malloc( sizeof(lfds611_atom_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ *(per_thread_counters+loop) = 0;\r
+\r
+ lfds611_ringbuffer_query( rs, LFDS611_RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ while( dvs[0] == LFDS611_VALIDITY_VALID and dvs[1] == LFDS611_VALIDITY_VALID and dvs[2] == LFDS611_VALIDITY_VALID and lfds611_ringbuffer_get_read_element(rs, &fe) )\r
+ {\r
+ lfds611_freelist_get_user_data_from_element( fe, (void *) &user_data );\r
+\r
+ thread = user_data >> (sizeof(lfds611_atom_t)*8-8);\r
+ count = (user_data << 8) >> 8;\r
+\r
+ if( thread >= cpu_count )\r
+ {\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_TEST_DATA;\r
+ lfds611_ringbuffer_put_read_element( rs, fe );\r
+ break;\r
+ }\r
+\r
+ if( per_thread_counters[thread] == 0 )\r
+ per_thread_counters[thread] = count;\r
+\r
+ if( count < per_thread_counters[thread] )\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+\r
+ if( count >= per_thread_counters[thread] )\r
+ per_thread_counters[thread] = count+1;\r
+\r
+ lfds611_ringbuffer_put_read_element( rs, fe );\r
+ }\r
+\r
+ free( per_thread_counters );\r
+\r
+ free( rtws );\r
+\r
+ lfds611_ringbuffer_delete( rs, NULL, NULL );\r
+\r
+ internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION ringbuffer_test_thread_simple_writer( void *ringbuffer_test_writing_state )\r
+{\r
+ struct ringbuffer_test_writing_state\r
+ *rtws;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ assert( ringbuffer_test_writing_state != NULL );\r
+\r
+ rtws = (struct ringbuffer_test_writing_state *) ringbuffer_test_writing_state;\r
+\r
+ lfds611_ringbuffer_use( rtws->rs );\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds611_ringbuffer_get_write_element( rtws->rs, &fe, NULL );\r
+ lfds611_freelist_set_user_data_in_element( fe, (void *) (lfds611_atom_t) (rtws->write_count++) );\r
+ lfds611_ringbuffer_put_write_element( rtws->rs, fe );\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void ringbuffer_test_reading_and_writing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ subloop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds611_ringbuffer_state\r
+ *rs;\r
+\r
+ struct ringbuffer_test_reading_and_writing_state\r
+ *rtrws;\r
+\r
+ struct lfds611_validation_info\r
+ vi = { 0, 0 };\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs[3];\r
+\r
+ /* TRD : we create a single ringbuffer\r
+ with 100000 elements\r
+ the ringbuffers starts empty\r
+\r
+ we create one thread per CPU\r
+ where each thread busy-works writing\r
+ and then immediately reading\r
+ for ten seconds\r
+\r
+ the user data in each written element is a combination\r
+ of the thread number and the counter\r
+\r
+ while a thread runs, it keeps track of the\r
+ counters for the other threads and throws an error\r
+ if it sees the number stay the same or decrease\r
+ */\r
+\r
+ internal_display_test_name( "Reading and writing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_ringbuffer_new( &rs, 100000, NULL, NULL );\r
+\r
+ rtrws = malloc( sizeof(struct ringbuffer_test_reading_and_writing_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (rtrws+loop)->rs = rs;\r
+ (rtrws+loop)->counter = (lfds611_atom_t) loop << (sizeof(lfds611_atom_t)*8-8);\r
+ (rtrws+loop)->cpu_count = cpu_count;\r
+ (rtrws+loop)->error_flag = LOWERED;\r
+ (rtrws+loop)->per_thread_counters = malloc( sizeof(lfds611_atom_t) * cpu_count );\r
+\r
+ for( subloop = 0 ; subloop < cpu_count ; subloop++ )\r
+ *((rtrws+loop)->per_thread_counters+subloop) = 0;\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, ringbuffer_test_thread_reader_writer, rtrws+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ lfds611_ringbuffer_query( rs, LFDS611_RINGBUFFER_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ if( (rtrws+loop)->error_flag == RAISED )\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_TEST_DATA;\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ free( (rtrws+loop)->per_thread_counters );\r
+\r
+ free( rtrws );\r
+\r
+ lfds611_ringbuffer_delete( rs, NULL, NULL );\r
+\r
+ internal_display_test_result( 3, "queue", dvs[0], "queue freelist", dvs[1], "freelist", dvs[2] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION ringbuffer_test_thread_reader_writer( void *ringbuffer_test_reading_and_writing_state )\r
+{\r
+ struct ringbuffer_test_reading_and_writing_state\r
+ *rtrws;\r
+\r
+ struct lfds611_freelist_element\r
+ *fe;\r
+\r
+ lfds611_atom_t\r
+ user_data,\r
+ thread,\r
+ count;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ assert( ringbuffer_test_reading_and_writing_state != NULL );\r
+\r
+ rtrws = (struct ringbuffer_test_reading_and_writing_state *) ringbuffer_test_reading_and_writing_state;\r
+\r
+ lfds611_ringbuffer_use( rtrws->rs );\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds611_ringbuffer_get_write_element( rtrws->rs, &fe, NULL );\r
+ lfds611_freelist_set_user_data_in_element( fe, (void *) (lfds611_atom_t) (rtrws->counter++) );\r
+ lfds611_ringbuffer_put_write_element( rtrws->rs, fe );\r
+\r
+ lfds611_ringbuffer_get_read_element( rtrws->rs, &fe );\r
+ lfds611_freelist_get_user_data_from_element( fe, (void *) &user_data );\r
+\r
+ thread = user_data >> (sizeof(lfds611_atom_t)*8-8);\r
+ count = (user_data << 8) >> 8;\r
+\r
+ if( thread >= rtrws->cpu_count )\r
+ rtrws->error_flag = RAISED;\r
+ else\r
+ {\r
+ if( count < rtrws->per_thread_counters[thread] )\r
+ rtrws->error_flag = RAISED;\r
+\r
+ if( count >= rtrws->per_thread_counters[thread] )\r
+ rtrws->per_thread_counters[thread] = count+1;\r
+ }\r
+\r
+ lfds611_ringbuffer_put_read_element( rtrws->rs, fe );\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_lfds611_slist( void )\r
+{\r
+ printf( "\n"\r
+ "SList Tests\n"\r
+ "===========\n" );\r
+\r
+ test_slist_new_delete_get();\r
+ test_slist_get_set_user_data();\r
+ test_slist_delete_all_elements();\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_slist_new_delete_get( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ struct lfds611_slist_state\r
+ *ss;\r
+\r
+ struct lfds611_slist_element\r
+ *se = NULL;\r
+\r
+ struct slist_test_state\r
+ *sts;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ size_t\r
+ total_create_count = 0,\r
+ total_delete_count = 0,\r
+ element_count = 0;\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs = LFDS611_VALIDITY_VALID;\r
+\r
+ /* TRD : two threads per CPU\r
+ first simply alternates between new_head() and new_next() (next on element created by head)\r
+ second calls get_next, if NULL, then calls get_head, and deletes the element\r
+ both threads keep count of created and deleted\r
+ validate is to reconcile created, deleted and remaining in list\r
+ */\r
+\r
+ internal_display_test_name( "New head/next, delete and get next" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_slist_new( &ss, NULL, NULL );\r
+\r
+ sts = malloc( sizeof(struct slist_test_state) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ {\r
+ (sts+loop)->ss = ss;\r
+ (sts+loop)->create_count = 0;\r
+ (sts+loop)->delete_count = 0;\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, slist_test_internal_thread_new_delete_get_new_head_and_next, sts+loop );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, slist_test_internal_thread_new_delete_get_delete_and_get, sts+loop+cpu_count );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : now validate\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ {\r
+ total_create_count += (sts+loop)->create_count;\r
+ total_delete_count += (sts+loop)->delete_count;\r
+ }\r
+\r
+ while( NULL != lfds611_slist_get_head_and_then_next(ss, &se) )\r
+ element_count++;\r
+\r
+ if( total_create_count - total_delete_count - element_count != 0 )\r
+ dvs = LFDS611_VALIDITY_INVALID_TEST_DATA;\r
+\r
+ free( sts );\r
+\r
+ lfds611_slist_delete( ss );\r
+\r
+ internal_display_test_result( 1, "slist", dvs );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION slist_test_internal_thread_new_delete_get_new_head_and_next( void *slist_test_state )\r
+{\r
+ struct slist_test_state\r
+ *sts;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ struct lfds611_slist_element\r
+ *se = NULL;\r
+\r
+ assert( slist_test_state != NULL );\r
+\r
+ sts = (struct slist_test_state *) slist_test_state;\r
+\r
+ lfds611_slist_use( sts->ss );\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 1 )\r
+ {\r
+ if( sts->create_count % 2 == 0 )\r
+ se = lfds611_slist_new_head( sts->ss, NULL );\r
+ else\r
+ lfds611_slist_new_next( se, NULL );\r
+\r
+ sts->create_count++;\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION slist_test_internal_thread_new_delete_get_delete_and_get( void *slist_test_state )\r
+{\r
+ struct slist_test_state\r
+ *sts;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ struct lfds611_slist_element\r
+ *se = NULL;\r
+\r
+ assert( slist_test_state != NULL );\r
+\r
+ sts = (struct slist_test_state *) slist_test_state;\r
+\r
+ lfds611_slist_use( sts->ss );\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 1 )\r
+ {\r
+ if( se == NULL )\r
+ lfds611_slist_get_head( sts->ss, &se );\r
+ else\r
+ lfds611_slist_get_next( se, &se );\r
+\r
+ if( se != NULL )\r
+ {\r
+ if( 1 == lfds611_slist_logically_delete_element(sts->ss, se) )\r
+ sts->delete_count++;\r
+ }\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_slist_get_set_user_data( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ struct lfds611_slist_state\r
+ *ss;\r
+\r
+ struct lfds611_slist_element\r
+ *se = NULL;\r
+\r
+ struct slist_test_state\r
+ *sts;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ lfds611_atom_t\r
+ thread_and_count,\r
+ thread,\r
+ count,\r
+ *per_thread_counters,\r
+ *per_thread_drop_flags;\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs = LFDS611_VALIDITY_VALID;\r
+\r
+ /* TRD : create a list of (cpu_count*10) elements, user data 0\r
+ one thread per CPU\r
+ each thread loops, setting user_data to ((thread_number << (sizeof(lfds611_atom_t)*8-8)) | count)\r
+ validation is to scan list, count on a per thread basis should go down only once\r
+ */\r
+\r
+ internal_display_test_name( "Get and set user data" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_slist_new( &ss, NULL, NULL );\r
+\r
+ for( loop = 0 ; loop < cpu_count * 10 ; loop++ )\r
+ lfds611_slist_new_head( ss, NULL );\r
+\r
+ sts = malloc( sizeof(struct slist_test_state) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (sts+loop)->ss = ss;\r
+ (sts+loop)->thread_and_count = (lfds611_atom_t) loop << (sizeof(lfds611_atom_t)*8-8);\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, slist_test_internal_thread_get_set_user_data, sts+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // now validate\r
+ per_thread_counters = malloc( sizeof(lfds611_atom_t) * cpu_count );\r
+ per_thread_drop_flags = malloc( sizeof(lfds611_atom_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ *(per_thread_counters+loop) = 0;\r
+ *(per_thread_drop_flags+loop) = 0;\r
+ }\r
+\r
+ while( dvs == LFDS611_VALIDITY_VALID and NULL != lfds611_slist_get_head_and_then_next(ss, &se) )\r
+ {\r
+ lfds611_slist_get_user_data_from_element( se, (void **) &thread_and_count );\r
+\r
+ thread = thread_and_count >> (sizeof(lfds611_atom_t)*8-8);\r
+ count = (thread_and_count << 8) >> 8;\r
+\r
+ if( thread >= cpu_count )\r
+ {\r
+ dvs = LFDS611_VALIDITY_INVALID_TEST_DATA;\r
+ break;\r
+ }\r
+\r
+ if( per_thread_counters[thread] == 0 )\r
+ {\r
+ per_thread_counters[thread] = count;\r
+ continue;\r
+ }\r
+\r
+ per_thread_counters[thread]++;\r
+\r
+ if( count < per_thread_counters[thread] and per_thread_drop_flags[thread] == 1 )\r
+ {\r
+ dvs = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+ break;\r
+ }\r
+\r
+ if( count < per_thread_counters[thread] and per_thread_drop_flags[thread] == 0 )\r
+ {\r
+ per_thread_drop_flags[thread] = 1;\r
+ per_thread_counters[thread] = count;\r
+ continue;\r
+ }\r
+\r
+ if( count < per_thread_counters[thread] )\r
+ dvs = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+\r
+ if( count >= per_thread_counters[thread] )\r
+ per_thread_counters[thread] = count;\r
+ }\r
+\r
+ free( per_thread_drop_flags );\r
+ free( per_thread_counters );\r
+\r
+ free( sts );\r
+\r
+ lfds611_slist_delete( ss );\r
+\r
+ internal_display_test_result( 1, "slist", dvs );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION slist_test_internal_thread_get_set_user_data( void *slist_test_state )\r
+{\r
+ struct slist_test_state\r
+ *sts;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ struct lfds611_slist_element\r
+ *se = NULL;\r
+\r
+ assert( slist_test_state != NULL );\r
+\r
+ sts = (struct slist_test_state *) slist_test_state;\r
+\r
+ lfds611_slist_use( sts->ss );\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 1 )\r
+ {\r
+ if( se == NULL )\r
+ lfds611_slist_get_head( sts->ss, &se );\r
+\r
+ lfds611_slist_set_user_data_in_element( se, (void *) sts->thread_and_count++ );\r
+\r
+ lfds611_slist_get_next( se, &se );\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_slist_delete_all_elements( void )\r
+{\r
+ struct lfds611_slist_state\r
+ *ss;\r
+\r
+ struct lfds611_slist_element\r
+ *se = NULL;\r
+\r
+ size_t\r
+ element_count = 0;\r
+\r
+ unsigned int\r
+ loop;\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs = LFDS611_VALIDITY_VALID;\r
+\r
+ /* TRD : this test creates a list of 100,000 elements\r
+ then simply calls delete_all_elements()\r
+ we then count the number of elements remaining\r
+ should be zero :-)\r
+ */\r
+\r
+ internal_display_test_name( "Delete all elements" );\r
+\r
+ lfds611_slist_new( &ss, NULL, NULL );\r
+\r
+ for( loop = 0 ; loop < 1000000 ; loop++ )\r
+ lfds611_slist_new_head( ss, NULL );\r
+\r
+ lfds611_slist_single_threaded_physically_delete_all_elements( ss );\r
+\r
+ while( NULL != lfds611_slist_get_head_and_then_next(ss, &se) )\r
+ element_count++;\r
+\r
+ if( element_count != 0 )\r
+ dvs = LFDS611_VALIDITY_INVALID_TEST_DATA;\r
+\r
+ lfds611_slist_delete( ss );\r
+\r
+ internal_display_test_result( 1, "slist", dvs );\r
+\r
+ return;\r
+}\r
+\r
--- /dev/null
+#include "internal.h"\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void test_lfds611_stack( void )\r
+{\r
+ printf( "\n"\r
+ "Stack Tests\n"\r
+ "===========\n" );\r
+\r
+ stack_test_internal_popping();\r
+ stack_test_internal_pushing();\r
+ stack_test_internal_popping_and_pushing();\r
+ stack_test_internal_rapid_popping_and_pushing();\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void stack_test_internal_popping( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ *found_count,\r
+ cpu_count;\r
+\r
+ lfds611_atom_t\r
+ count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs = LFDS611_VALIDITY_VALID;\r
+\r
+ struct lfds611_stack_state\r
+ *ss;\r
+\r
+ struct stack_test_popping_state\r
+ *stps;\r
+\r
+ /* TRD : we create a stack with 1,000,000 elements\r
+\r
+ we then populate the stack, where each element is\r
+ set to contain a void pointer which is its element number\r
+\r
+ we then run one thread per CPU\r
+ where each thread loops, popping as quickly as possible\r
+ each popped element is pushed onto a thread-local stack\r
+\r
+ the threads run till the source stack is empty\r
+\r
+ we then check the thread-local stacks\r
+ we should find we have every element\r
+\r
+ then tidy up\r
+ */\r
+\r
+ internal_display_test_name( "Popping" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_stack_new( &ss, 1000000 );\r
+\r
+ for( loop = 0 ; loop < 1000000 ; loop++ )\r
+ lfds611_stack_push( ss, (void *) (lfds611_atom_t) loop );\r
+\r
+ stps = malloc( sizeof(struct stack_test_popping_state) * cpu_count );\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (stps+loop)->ss = ss;\r
+ lfds611_stack_new( &(stps+loop)->ss_thread_local, 1000000 );\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, stack_test_internal_thread_popping, stps+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : now we check the thread-local stacks\r
+ found_count = malloc( sizeof(unsigned int) * 1000000 );\r
+ for( loop = 0 ; loop < 1000000 ; loop++ )\r
+ *(found_count+loop) = 0;\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ while( lfds611_stack_pop((stps+loop)->ss_thread_local, (void **) &count) )\r
+ (*(found_count+count))++;\r
+\r
+ for( loop = 0 ; loop < 1000000 and dvs == LFDS611_VALIDITY_VALID ; loop++ )\r
+ {\r
+ if( *(found_count+loop) == 0 )\r
+ dvs = LFDS611_VALIDITY_INVALID_MISSING_ELEMENTS;\r
+\r
+ if( *(found_count+loop) > 1 )\r
+ dvs = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+ }\r
+\r
+ // TRD : cleanup\r
+ free( found_count );\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ lfds611_stack_delete( (stps+loop)->ss_thread_local, NULL, NULL );\r
+ free( stps );\r
+ lfds611_stack_delete( ss, NULL, NULL );\r
+\r
+ // TRD : print the test result\r
+ internal_display_test_result( 1, "stack", dvs );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION stack_test_internal_thread_popping( void *stack_test_popping_state )\r
+{\r
+ struct stack_test_popping_state\r
+ *stps;\r
+\r
+ lfds611_atom_t\r
+ count;\r
+\r
+ assert( stack_test_popping_state != NULL );\r
+\r
+ stps = (struct stack_test_popping_state *) stack_test_popping_state;\r
+\r
+ lfds611_stack_use( stps->ss );\r
+\r
+ while( lfds611_stack_pop(stps->ss, (void **) &count) )\r
+ lfds611_stack_push( stps->ss_thread_local, (void *) count );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void stack_test_internal_pushing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs[2];\r
+\r
+ struct stack_test_pushing_state\r
+ *stps;\r
+\r
+ struct lfds611_stack_state\r
+ *ss;\r
+\r
+ lfds611_atom_t\r
+ user_data,\r
+ thread,\r
+ count,\r
+ *per_thread_counters;\r
+\r
+ struct lfds611_validation_info\r
+ vi = { 1000000, 1000000 };\r
+\r
+ /* TRD : we create a stack with 1,000,000 elements\r
+\r
+ we then create one thread per CPU, where each thread\r
+ pushes as quickly as possible to the stack\r
+\r
+ the data pushed is a counter and a thread ID\r
+\r
+ the threads exit when the stack is full\r
+\r
+ we then validate the stack;\r
+\r
+ checking that the counts increment on a per unique ID basis\r
+ and that the number of elements we pop equals 1,000,000\r
+ (since each element has an incrementing counter which is\r
+ unique on a per unique ID basis, we can know we didn't lose\r
+ any elements)\r
+ */\r
+\r
+ internal_display_test_name( "Pushing" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ stps = malloc( sizeof(struct stack_test_pushing_state) * cpu_count );\r
+\r
+ // TRD : the main stack\r
+ lfds611_stack_new( &ss, 1000000 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (stps+loop)->thread_number = (lfds611_atom_t) loop;\r
+ (stps+loop)->ss = ss;\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, stack_test_internal_thread_pushing, stps+loop );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ // TRD : the stack is now fully pushed; time to verify\r
+ per_thread_counters = malloc( sizeof(lfds611_atom_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ *(per_thread_counters+loop) = 1000000;\r
+\r
+ lfds611_stack_query( ss, LFDS611_STACK_QUERY_VALIDATE, &vi, (void *) dvs );\r
+\r
+ while( dvs[0] == LFDS611_VALIDITY_VALID and lfds611_stack_pop(ss, (void **) &user_data) )\r
+ {\r
+ thread = user_data >> (sizeof(lfds611_atom_t)*8-8);\r
+ count = (user_data << 8) >> 8;\r
+\r
+ if( thread >= cpu_count )\r
+ {\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_TEST_DATA;\r
+ break;\r
+ }\r
+\r
+ if( count > per_thread_counters[thread] )\r
+ dvs[0] = LFDS611_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;\r
+\r
+ if( count < per_thread_counters[thread] )\r
+ per_thread_counters[thread] = count-1;\r
+ }\r
+\r
+ // TRD : clean up\r
+ free( per_thread_counters );\r
+\r
+ free( stps );\r
+\r
+ lfds611_stack_delete( ss, NULL, NULL );\r
+\r
+ // TRD : print the test result\r
+ internal_display_test_result( 2, "stack", dvs[0], "stack freelist", dvs[1] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION stack_test_internal_thread_pushing( void *stack_test_pushing_state )\r
+{\r
+ struct stack_test_pushing_state\r
+ *stps;\r
+\r
+ lfds611_atom_t\r
+ counter = 0;\r
+\r
+ assert( stack_test_pushing_state != NULL );\r
+\r
+ stps = (struct stack_test_pushing_state *) stack_test_pushing_state;\r
+\r
+ lfds611_stack_use( stps->ss );\r
+\r
+ // TRD : we write (thread_number | counter), where thread_number is the top 8 bits of the lfds611_atom_t\r
+ while( lfds611_stack_push(stps->ss, (void **) ((stps->thread_number << (sizeof(lfds611_atom_t)*8-8)) | counter++)) );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void stack_test_internal_popping_and_pushing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ subloop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs[2];\r
+\r
+ struct lfds611_stack_state\r
+ *ss;\r
+\r
+ struct stack_test_popping_and_pushing_state\r
+ *stpps;\r
+\r
+ struct lfds611_validation_info\r
+ vi;\r
+\r
+ /* TRD : we have two threads per CPU\r
+ the threads loop for ten seconds\r
+ the first thread pushes 100000 elements then pops 100000 elements\r
+ the second thread pops 100000 elements then pushes 100000 elements\r
+ all pushes and pops go onto the single main stack\r
+\r
+ after time is up, all threads push what they have remaining onto\r
+ the main stack\r
+\r
+ we then validate the main stack\r
+ */\r
+\r
+ internal_display_test_name( "Popping and pushing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ // TRD : just some initial elements so the pushing threads can start immediately\r
+ lfds611_stack_new( &ss, 100000 * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < 100000 * cpu_count ; loop++ )\r
+ lfds611_stack_push( ss, (void *) (lfds611_atom_t) loop );\r
+\r
+ stpps = malloc( sizeof(struct stack_test_popping_and_pushing_state) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ (stpps+loop)->ss = ss;\r
+ lfds611_stack_new( &(stpps+loop)->local_ss, 100000 );\r
+\r
+ (stpps+loop+cpu_count)->ss = ss;\r
+ lfds611_stack_new( &(stpps+loop+cpu_count)->local_ss, 100000 );\r
+\r
+ // TRD : fill the pushing thread stacks\r
+ for( subloop = 0 ; subloop < 100000 ; subloop++ )\r
+ lfds611_stack_push( (stpps+loop+cpu_count)->local_ss, (void *) (lfds611_atom_t) subloop );\r
+ }\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count * 2 );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ {\r
+ abstraction_thread_start( &thread_handles[loop], loop, stack_test_internal_thread_popping_and_pushing_start_popping, stpps+loop );\r
+ abstraction_thread_start( &thread_handles[loop+cpu_count], loop, stack_test_internal_thread_popping_and_pushing_start_pushing, stpps+loop+cpu_count );\r
+ }\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ for( loop = 0 ; loop < cpu_count * 2 ; loop++ )\r
+ lfds611_stack_delete( (stpps+loop)->local_ss, NULL, NULL );\r
+\r
+ free( stpps );\r
+\r
+ vi.min_elements = vi.max_elements = 100000 * cpu_count * 2;\r
+\r
+ lfds611_stack_query( ss, LFDS611_STACK_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ lfds611_stack_delete( ss, NULL, NULL );\r
+\r
+ // TRD : print the test result\r
+ internal_display_test_result( 2, "stack", dvs[0], "stack freelist", dvs[1] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION stack_test_internal_thread_popping_and_pushing_start_popping( void *stack_test_popping_and_pushing_state )\r
+{\r
+ struct stack_test_popping_and_pushing_state\r
+ *stpps;\r
+\r
+ void\r
+ *user_data;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ unsigned int\r
+ count;\r
+\r
+ assert( stack_test_popping_and_pushing_state != NULL );\r
+\r
+ stpps = (struct stack_test_popping_and_pushing_state *) stack_test_popping_and_pushing_state;\r
+\r
+ lfds611_stack_use( stpps->ss );\r
+ lfds611_stack_use( stpps->local_ss );\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ count = 0;\r
+\r
+ while( count < 100000 )\r
+ if( lfds611_stack_pop(stpps->ss, &user_data) )\r
+ {\r
+ lfds611_stack_push( stpps->local_ss, user_data );\r
+ count++;\r
+ }\r
+\r
+ // TRD : return our local stack to the main stack\r
+ while( lfds611_stack_pop(stpps->local_ss, &user_data) )\r
+ lfds611_stack_push( stpps->ss, user_data );\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION stack_test_internal_thread_popping_and_pushing_start_pushing( void *stack_test_popping_and_pushing_state )\r
+{\r
+ struct stack_test_popping_and_pushing_state\r
+ *stpps;\r
+\r
+ void\r
+ *user_data;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ unsigned int\r
+ count;\r
+\r
+ assert( stack_test_popping_and_pushing_state != NULL );\r
+\r
+ stpps = (struct stack_test_popping_and_pushing_state *) stack_test_popping_and_pushing_state;\r
+\r
+ lfds611_stack_use( stpps->ss );\r
+ lfds611_stack_use( stpps->local_ss );\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ // TRD : return our local stack to the main stack\r
+ while( lfds611_stack_pop(stpps->local_ss, &user_data) )\r
+ lfds611_stack_push( stpps->ss, user_data );\r
+\r
+ count = 0;\r
+\r
+ while( count < 100000 )\r
+ if( lfds611_stack_pop(stpps->ss, &user_data) )\r
+ {\r
+ lfds611_stack_push( stpps->local_ss, user_data );\r
+ count++;\r
+ }\r
+ }\r
+\r
+ // TRD : now push whatever we have in our local stack\r
+ while( lfds611_stack_pop(stpps->local_ss, &user_data) )\r
+ lfds611_stack_push( stpps->ss, user_data );\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+void stack_test_internal_rapid_popping_and_pushing( void )\r
+{\r
+ unsigned int\r
+ loop,\r
+ cpu_count;\r
+\r
+ thread_state_t\r
+ *thread_handles;\r
+\r
+ struct lfds611_stack_state\r
+ *ss;\r
+\r
+ struct lfds611_validation_info\r
+ vi;\r
+\r
+ enum lfds611_data_structure_validity\r
+ dvs[2];\r
+\r
+ /* TRD : in these tests there is a fundamental antagonism between\r
+ how much checking/memory clean up that we do and the\r
+ likelyhood of collisions between threads in their lock-free\r
+ operations\r
+\r
+ the lock-free operations are very quick; if we do anything\r
+ much at all between operations, we greatly reduce the chance\r
+ of threads colliding\r
+\r
+ so we have some tests which do enough checking/clean up that\r
+ they can tell the stack is valid and don't leak memory\r
+ and here, this test now is one of those which does minimal\r
+ checking - in fact, the nature of the test is that you can't\r
+ do any real checking - but goes very quickly\r
+\r
+ what we do is create a small stack and then run one thread\r
+ per CPU, where each thread simply pushes and then immediately\r
+ pops\r
+\r
+ the test runs for ten seconds\r
+\r
+ after the test is done, the only check we do is to traverse\r
+ the stack, checking for loops and ensuring the number of\r
+ elements is correct\r
+ */\r
+\r
+ internal_display_test_name( "Rapid popping and pushing (10 seconds)" );\r
+\r
+ cpu_count = abstraction_cpu_count();\r
+\r
+ lfds611_stack_new( &ss, cpu_count );\r
+\r
+ thread_handles = malloc( sizeof(thread_state_t) * cpu_count );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_start( &thread_handles[loop], loop, stack_test_internal_thread_rapid_popping_and_pushing, ss );\r
+\r
+ for( loop = 0 ; loop < cpu_count ; loop++ )\r
+ abstraction_thread_wait( thread_handles[loop] );\r
+\r
+ free( thread_handles );\r
+\r
+ vi.min_elements = 0;\r
+ vi.max_elements = 0;\r
+\r
+ lfds611_stack_query( ss, LFDS611_STACK_QUERY_VALIDATE, (void *) &vi, (void *) dvs );\r
+\r
+ lfds611_stack_delete( ss, NULL, NULL );\r
+\r
+ // TRD : print the test result\r
+ internal_display_test_result( 2, "stack", dvs[0], "stack freelist", dvs[1] );\r
+\r
+ return;\r
+}\r
+\r
+\r
+\r
+\r
+\r
+/****************************************************************************/\r
+thread_return_t CALLING_CONVENTION stack_test_internal_thread_rapid_popping_and_pushing( void *stack_state )\r
+{\r
+ struct lfds611_stack_state\r
+ *ss;\r
+\r
+ void\r
+ *user_data = NULL;\r
+\r
+ time_t\r
+ start_time;\r
+\r
+ assert( stack_state != NULL );\r
+\r
+ ss = (struct lfds611_stack_state *) stack_state;\r
+\r
+ lfds611_stack_use( ss );\r
+\r
+ time( &start_time );\r
+\r
+ while( time(NULL) < start_time + 10 )\r
+ {\r
+ lfds611_stack_push( ss, user_data );\r
+ lfds611_stack_pop( ss, &user_data );\r
+ }\r
+\r
+ return( (thread_return_t) EXIT_SUCCESS );\r
+}\r
+\r
--- /dev/null
+\r
+Microsoft Visual Studio Solution File, Format Version 10.00\r
+# Visual Studio 2008\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test", "test.vcproj", "{6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}"\r
+ ProjectSection(ProjectDependencies) = postProject\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05} = {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}\r
+ EndProjectSection\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblfds611", "..\liblfds611\liblfds611.vcproj", "{F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug DLL|Win32 = Debug DLL|Win32\r
+ Debug DLL|x64 = Debug DLL|x64\r
+ Debug Lib|Win32 = Debug Lib|Win32\r
+ Debug Lib|x64 = Debug Lib|x64\r
+ Debug|Win32 = Debug|Win32\r
+ Debug|x64 = Debug|x64\r
+ Release DLL|Win32 = Release DLL|Win32\r
+ Release DLL|x64 = Release DLL|x64\r
+ Release Lib|Win32 = Release Lib|Win32\r
+ Release Lib|x64 = Release Lib|x64\r
+ Release|Win32 = Release|Win32\r
+ Release|x64 = Release|x64\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug DLL|Win32.ActiveCfg = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug DLL|Win32.Build.0 = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug DLL|x64.ActiveCfg = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug DLL|x64.Build.0 = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug Lib|Win32.ActiveCfg = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug Lib|Win32.Build.0 = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug Lib|x64.ActiveCfg = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug Lib|x64.Build.0 = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|Win32.ActiveCfg = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|Win32.Build.0 = Debug|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|x64.ActiveCfg = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Debug|x64.Build.0 = Debug|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release DLL|Win32.ActiveCfg = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release DLL|Win32.Build.0 = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release DLL|x64.ActiveCfg = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release DLL|x64.Build.0 = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release Lib|Win32.ActiveCfg = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release Lib|Win32.Build.0 = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release Lib|x64.ActiveCfg = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release Lib|x64.Build.0 = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|Win32.ActiveCfg = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|Win32.Build.0 = Release|Win32\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|x64.ActiveCfg = Release|x64\r
+ {6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}.Release|x64.Build.0 = Release|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|Win32.Build.0 = Debug DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.ActiveCfg = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug DLL|x64.Build.0 = Debug DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.ActiveCfg = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|Win32.Build.0 = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.ActiveCfg = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug Lib|x64.Build.0 = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|Win32.ActiveCfg = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|Win32.Build.0 = Debug Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|x64.ActiveCfg = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Debug|x64.Build.0 = Debug Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.ActiveCfg = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|Win32.Build.0 = Release DLL|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.ActiveCfg = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release DLL|x64.Build.0 = Release DLL|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.ActiveCfg = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|Win32.Build.0 = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.ActiveCfg = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release Lib|x64.Build.0 = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|Win32.ActiveCfg = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|Win32.Build.0 = Release Lib|Win32\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|x64.ActiveCfg = Release Lib|x64\r
+ {F73AE755-F6D8-4C3A-977D-FBB40DC0ED05}.Release|x64.Build.0 = Release Lib|x64\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+<?xml version="1.0" encoding="Windows-1252"?>\r
+<VisualStudioProject\r
+ ProjectType="Visual C++"\r
+ Version="9.00"\r
+ Name="test"\r
+ ProjectGUID="{6E4CBF20-DF1A-4FA0-8A90-58E2A3A5CF09}"\r
+ RootNamespace="test"\r
+ TargetFrameworkVersion="196613"\r
+ >\r
+ <Platforms>\r
+ <Platform\r
+ Name="Win32"\r
+ />\r
+ <Platform\r
+ Name="x64"\r
+ />\r
+ </Platforms>\r
+ <ToolFiles>\r
+ </ToolFiles>\r
+ <Configurations>\r
+ <Configuration\r
+ Name="Debug|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds611\inc""\r
+ MinimalRebuild="true"\r
+ ExceptionHandling="0"\r
+ BasicRuntimeChecks="3"\r
+ RuntimeLibrary="1"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="4"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="true"\r
+ SubSystem="1"\r
+ TargetMachine="1"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Debug|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /D_DEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="0"\r
+ EnableIntrinsicFunctions="true"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds611\inc""\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="1"\r
+ EnableFunctionLevelLinking="true"\r
+ BrowseInformation="1"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="3"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmtd.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="true"\r
+ SubSystem="1"\r
+ TargetMachine="17"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release|Win32"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ WholeProgramOptimization="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds611\inc""\r
+ ExceptionHandling="0"\r
+ RuntimeLibrary="0"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ DebugInformationFormat="0"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="false"\r
+ SubSystem="1"\r
+ OptimizeReferences="2"\r
+ EnableCOMDATFolding="2"\r
+ TargetMachine="1"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ <Configuration\r
+ Name="Release|x64"\r
+ OutputDirectory="$(ProjectDir)\bin\$(PlatformName)\$(ConfigurationName)"\r
+ IntermediateDirectory="$(ProjectDir)\obj\$(PlatformName)\$(ConfigurationName)"\r
+ ConfigurationType="1"\r
+ CharacterSet="1"\r
+ >\r
+ <Tool\r
+ Name="VCPreBuildEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCCustomBuildTool"\r
+ />\r
+ <Tool\r
+ Name="VCXMLDataGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCWebServiceProxyGeneratorTool"\r
+ />\r
+ <Tool\r
+ Name="VCMIDLTool"\r
+ />\r
+ <Tool\r
+ Name="VCCLCompilerTool"\r
+ AdditionalOptions="/DWIN32_LEAN_AND_MEAN /DNDEBUG /D_CRT_SECURE_NO_WARNINGS"\r
+ Optimization="3"\r
+ EnableIntrinsicFunctions="true"\r
+ FavorSizeOrSpeed="1"\r
+ AdditionalIncludeDirectories=""$(ProjectDir)\src";"$(ProjectDir)\..\liblfds611\inc""\r
+ ExceptionHandling="0"\r
+ EnableFunctionLevelLinking="true"\r
+ WarningLevel="4"\r
+ WarnAsError="true"\r
+ CompileAs="1"\r
+ />\r
+ <Tool\r
+ Name="VCManagedResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCResourceCompilerTool"\r
+ />\r
+ <Tool\r
+ Name="VCPreLinkEventTool"\r
+ />\r
+ <Tool\r
+ Name="VCLinkerTool"\r
+ AdditionalDependencies="libcmt.lib"\r
+ IgnoreAllDefaultLibraries="true"\r
+ GenerateDebugInformation="false"\r
+ SubSystem="1"\r
+ OptimizeReferences="2"\r
+ EnableCOMDATFolding="2"\r
+ TargetMachine="17"\r
+ />\r
+ <Tool\r
+ Name="VCALinkTool"\r
+ />\r
+ <Tool\r
+ Name="VCManifestTool"\r
+ />\r
+ <Tool\r
+ Name="VCXDCMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCBscMakeTool"\r
+ />\r
+ <Tool\r
+ Name="VCFxCopTool"\r
+ />\r
+ <Tool\r
+ Name="VCAppVerifierTool"\r
+ />\r
+ <Tool\r
+ Name="VCPostBuildEventTool"\r
+ />\r
+ </Configuration>\r
+ </Configurations>\r
+ <References>\r
+ </References>\r
+ <Files>\r
+ <Filter\r
+ Name="src"\r
+ Filter="cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx"\r
+ UniqueIdentifier="{4FC737F1-C7A5-4376-A066-2A32D752A2FF}"\r
+ >\r
+ <File\r
+ RelativePath=".\src\abstraction.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\abstraction_cpu_count.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\abstraction_thread_start.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\abstraction_thread_wait.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_freelist.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_queue.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_ringbuffer.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\benchmark_stack.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\internal.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\main.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\misc.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\structures.h"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_abstraction.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_freelist.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_queue.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_ringbuffer.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_slist.c"\r
+ >\r
+ </File>\r
+ <File\r
+ RelativePath=".\src\test_stack.c"\r
+ >\r
+ </File>\r
+ </Filter>\r
+ </Files>\r
+ <Globals>\r
+ </Globals>\r
+</VisualStudioProject>\r
--- /dev/null
+##### paths #####
+BINDIR := ../../bin
+INCDIR := ../../inc
+OBJDIR := ../../obj
+SRCDIR := ../../src
+
+##### misc #####
+QUIETLY := 1>/dev/null 2>/dev/null
+VERSION_NUMBER := 1
+MINOR_NUMBER := 0
+RELEASE_NUMBER := 0
+
+##### sources, objects and libraries #####
+BINNAME := liblfds700
+ARFILENAME := $(BINNAME).a
+ARPATHNAME := $(BINDIR)/$(ARFILENAME)
+SOBASENAME := $(BINNAME).so
+SONAME := $(SOBASENAME).$(VERSION_NUMBER)
+SOFILENAME := $(SONAME).$(MINOR_NUMBER).$(RELEASE_NUMBER)
+SOPATHNAME := $(BINDIR)/$(SOFILENAME)
+INCNAME := $(INCDIR)/$(BINNAME).h
+SRCDIRS := lfds700_btree_addonly_unbalanced lfds700_freelist lfds700_hash_addonly lfds700_list_addonly_ordered_singlylinked lfds700_list_addonly_singlylinked_unordered lfds700_misc lfds700_queue lfds700_queue_bounded_singleconsumer_singleproducer lfds700_ringbuffer lfds700_stack
+SOURCES := lfds700_hash_addonly_cleanup.c lfds700_hash_addonly_get.c lfds700_hash_addonly_init.c lfds700_hash_addonly_insert.c lfds700_hash_addonly_iterate.c lfds700_hash_addonly_query.c \
+ lfds700_list_addonly_ordered_singlylinked_cleanup.c lfds700_list_addonly_ordered_singlylinked_get.c lfds700_list_addonly_ordered_singlylinked_init.c lfds700_list_addonly_ordered_singlylinked_insert.c lfds700_list_addonly_ordered_singlylinked_query.c \
+ lfds700_list_addonly_singlylinked_unordered_cleanup.c lfds700_list_addonly_singlylinked_unordered_get.c lfds700_list_addonly_singlylinked_unordered_init.c lfds700_list_addonly_singlylinked_unordered_insert.c lfds700_list_addonly_singlylinked_unordered_query.c \
+ lfds700_btree_addonly_unbalanced_cleanup.c lfds700_btree_addonly_unbalanced_get.c lfds700_btree_addonly_unbalanced_init.c lfds700_btree_addonly_unbalanced_insert.c lfds700_btree_addonly_unbalanced_query.c \
+ lfds700_freelist_cleanup.c lfds700_freelist_init.c lfds700_freelist_pop.c lfds700_freelist_push.c lfds700_freelist_query.c \
+ lfds700_misc_cleanup.c lfds700_misc_globals.c lfds700_misc_init.c lfds700_misc_prng.c lfds700_misc_query.c \
+ lfds700_queue_cleanup.c lfds700_queue_dequeue.c lfds700_queue_enqueue.c lfds700_queue_init.c lfds700_queue_query.c \
+ lfds700_queue_bounded_singleconsumer_singleproducer_cleanup.c lfds700_queue_bounded_singleconsumer_singleproducer_dequeue.c lfds700_queue_bounded_singleconsumer_singleproducer_enqueue.c lfds700_queue_bounded_singleconsumer_singleproducer_init.c lfds700_queue_bounded_singleconsumer_singleproducer_query.c \
+ lfds700_ringbuffer_cleanup.c lfds700_ringbuffer_init.c lfds700_ringbuffer_query.c lfds700_ringbuffer_read.c lfds700_ringbuffer_write.c \
+ lfds700_stack_cleanup.c lfds700_stack_init.c lfds700_stack_pop.c lfds700_stack_push.c lfds700_stack_query.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))
+SYSLIBS :=
+
+##### default paths fix up #####
+CPATH := $(subst : ,:,$(SRCDIR):$(INCDIR))
+
+##### tools #####
+MAKE := make
+MFLAGS :=
+
+DG := gcc
+DGFLAGS := -MM -std=gnu89
+
+CC := gcc
+CFBASE := -c -fno-strict-aliasing -std=gnu89 -Wall -Werror -Wno-unknown-pragmas -Wno-unused-but-set-variable -Wno-unused-variable
+CFCOV := -O0 -ggdb -DCOVERAGE -fprofile-arcs -ftest-coverage
+CFDBG := -O0 -ggdb -D_DEBUG
+CFPROF := -O0 -ggdb -DPROF -pg
+CFREL := -O2 -DNDEBUG -finline-functions
+CFTSAN := -O0 -ggdb -DTSAN -fsanitize=thread -fPIC
+CFBARE := -ffreestanding -nodefaultlibs -nostdinc -nostdlib
+
+AR := ar
+AFLAGS := -rcs
+
+LD := gcc
+LFBASE := -pthread -shared -std=gnu89 -Wl,-soname,$(SONAME) -o $(SOPATHNAME) -Wall -Werror
+LFCOV := -O0 -fprofile-arcs -ftest-coverage
+LFDBG := -O0 -ggdb
+LFPROF := -O0 -pg
+LFREL := -O2 -s -finline-functions
+LFTSAN := -O0 -fsanitize=thread -fPIC
+LFBARE := -ffreestanding -nodefaultlibs -nostdinc -nostdlib
+
+##### CPU variants #####
+GCCARCH := native
+CFBASE += -march=$(GCCARCH)
+
+##### build variants #####
+ifeq ($(findstring so,$(MAKECMDGOALS)),so)
+ CFBASE += -fPIC
+endif
+
+CFLAGS += $(CFBASE)
+LFLAGS += $(LFBASE)
+
+ifeq ($(MAKECMDGOALS),)
+ CFLAGS += $(CFDBG)
+ LFLAGS += $(LFDBG)
+endif
+
+ifeq ($(findstring cov,$(MAKECMDGOALS)),cov)
+ CFLAGS += $(CFCOV)
+ LFLAGS += $(LFCOV)
+ SYSLIBS += -lgcov
+endif
+
+ifeq ($(findstring dbg,$(MAKECMDGOALS)),dbg)
+ CFLAGS += $(CFDBG)
+ LFLAGS += $(LFDBG)
+endif
+
+ifeq ($(findstring prof,$(MAKECMDGOALS)),prof)
+ CFLAGS += $(CFPROF)
+ LFLAGS += $(LFPROF)
+endif
+
+ifeq ($(findstring rel,$(MAKECMDGOALS)),rel)
+ CFLAGS += $(CFREL)
+ LFLAGS += $(LFREL)
+endif
+
+ifeq ($(findstring tsan,$(MAKECMDGOALS)),tsan)
+ CFLAGS += $(CFTSAN)
+ LFLAGS += $(LFTSAN)
+endif
+
+ifeq ($(findstring b_,$(MAKECMDGOALS)),b_)
+ CFLAGS += $(CFBARE)
+ LFLAGS += $(CFBARE)
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.o : %.c
+ $(DG) $(DGFLAGS) $< >$(OBJDIR)/$*.d
+ $(CC) $(CFLAGS) -o $@ $<
+
+##### explicit rules #####
+$(ARPATHNAME) : $(OBJECTS)
+ $(AR) $(AFLAGS) $(ARPATHNAME) $(OBJECTS)
+
+$(SOPATHNAME) : $(OBJECTS)
+ $(LD) $(LFLAGS) $(OBJECTS) -o $(SOPATHNAME)
+ @ln -fs $(SOFILENAME) $(BINDIR)/$(SONAME)
+ @ln -fs $(SOFILENAME) $(BINDIR)/$(SOBASENAME)
+
+##### phony #####
+.PHONY : clean bare_ar_cov bare_ar_dbg bare_ar_prof bare_ar_rel bare_ar_tsan bare_so_cov bare_so_dbg bare_so_prof bare_so_rel bare_so_tsan hosted_ar_cov hosted_ar_dbg hosted_ar_prof hosted_ar_rel hosted_ar_tsan hosted_so_cov hosted_so_dbg hosted_so_prof hosted_so_rel hosted_so_tsan
+
+clean :
+ @rm -f $(BINDIR)/* $(OBJDIR)/*
+
+bare_ar_cov : $(ARPATHNAME) # bare, archive (.a), coverage
+bare_ar_dbg : $(ARPATHNAME) # bare, archive (.a), debug
+bare_ar_prof : $(ARPATHNAME) # bare, archive (.a), profiling
+bare_ar_rel : $(ARPATHNAME) # bare, archive (.a), release
+bare_ar_tsan : $(ARPATHNAME) # bare, archive (.a), thread sanitizer
+
+bare_so_cov : $(SOPATHNAME) # bare, shared (.so), coverage
+bare_so_dbg : $(SOPATHNAME) # bare, shared (.so), debug
+bare_so_prof : $(SOPATHNAME) # bare, shared (.so), profiling
+bare_so_rel : $(SOPATHNAME) # bare, shared (.so), release
+bare_so_tsan : $(SOPATHNAME) # bare, shared (.so), thread sanitizer
+
+hosted_ar_cov : $(ARPATHNAME) # hosted implementation, archive (.a), coverage
+hosted_ar_dbg : $(ARPATHNAME) # hosted implementation, archive (.a), debug
+hosted_ar_prof : $(ARPATHNAME) # hosted implementation, archive (.a), profiling
+hosted_ar_rel : $(ARPATHNAME) # hosted implementation, archive (.a), release
+hosted_ar_tsan : $(ARPATHNAME) # hosted implementation, archive (.a), thread sanitizer
+
+hosted_so_cov : $(SOPATHNAME) # hosted implementation, shared (.so), coverage
+hosted_so_dbg : $(SOPATHNAME) # hosted implementation, shared (.so), debug
+hosted_so_prof : $(SOPATHNAME) # hosted implementation, shared (.so), profiling
+hosted_so_rel : $(SOPATHNAME) # hosted implementation, shared (.so), release
+hosted_so_tsan : $(SOPATHNAME) # hosted implementation, shared (.so), thread sanitizer
+
+##### dependencies #####
+-include $(DEPENDS)
+
+##### notes #####
+# TRD : we use -std=gnu89 for C++ style comments
+# hosted implementation differs from bare simply in that <assert.h> ends up being included
+
--- /dev/null
+lib-y :=
+
+lib-y += ../../src/lfds700_btree_addonly_unbalanced/lfds700_btree_addonly_unbalanced_cleanup.o
+lib-y += ../../src/lfds700_btree_addonly_unbalanced/lfds700_btree_addonly_unbalanced_get.o
+lib-y += ../../src/lfds700_btree_addonly_unbalanced/lfds700_btree_addonly_unbalanced_init.o
+lib-y += ../../src/lfds700_btree_addonly_unbalanced/lfds700_btree_addonly_unbalanced_insert.o
+lib-y += ../../src/lfds700_btree_addonly_unbalanced/lfds700_btree_addonly_unbalanced_query.o
+
+lib-y += ../../src/lfds700_freelist/lfds700_freelist_cleanup.o
+lib-y += ../../src/lfds700_freelist/lfds700_freelist_init.o
+lib-y += ../../src/lfds700_freelist/lfds700_freelist_pop.o
+lib-y += ../../src/lfds700_freelist/lfds700_freelist_push.o
+lib-y += ../../src/lfds700_freelist/lfds700_freelist_query.o
+
+lib-y += ../../src/lfds700_hash_addonly/lfds700_hash_addonly_cleanup.o
+lib-y += ../../src/lfds700_hash_addonly/lfds700_hash_addonly_get.o
+lib-y += ../../src/lfds700_hash_addonly/lfds700_hash_addonly_init.o
+lib-y += ../../src/lfds700_hash_addonly/lfds700_hash_addonly_insert.o
+lib-y += ../../src/lfds700_hash_addonly/lfds700_hash_addonly_iterate.o
+lib-y += ../../src/lfds700_hash_addonly/lfds700_hash_addonly_query.o
+
+lib-y += ../../src/lfds700_list_addonly_ordered_singlylinked/lfds700_list_addonly_ordered_singlylinked_cleanup.o
+lib-y += ../../src/lfds700_list_addonly_ordered_singlylinked/lfds700_list_addonly_ordered_singlylinked_get.o
+lib-y += ../../src/lfds700_list_addonly_ordered_singlylinked/lfds700_list_addonly_ordered_singlylinked_init.o
+lib-y += ../../src/lfds700_list_addonly_ordered_singlylinked/lfds700_list_addonly_ordered_singlylinked_insert.o
+lib-y += ../../src/lfds700_list_addonly_ordered_singlylinked/lfds700_list_addonly_ordered_singlylinked_query.o
+
+lib-y += ../../src/lfds700_list_addonly_singlylinked_unordered/lfds700_list_addonly_singlylinked_unordered_cleanup.o
+lib-y += ../../src/lfds700_list_addonly_singlylinked_unordered/lfds700_list_addonly_singlylinked_unordered_get.o
+lib-y += ../../src/lfds700_list_addonly_singlylinked_unordered/lfds700_list_addonly_singlylinked_unordered_init.o
+lib-y += ../../src/lfds700_list_addonly_singlylinked_unordered/lfds700_list_addonly_singlylinked_unordered_insert.o
+lib-y += ../../src/lfds700_list_addonly_singlylinked_unordered/lfds700_list_addonly_singlylinked_unordered_query.o
+
+lib-y += ../../src/lfds700_misc/lfds700_misc_cleanup.o
+lib-y += ../../src/lfds700_misc/lfds700_misc_globals.o
+lib-y += ../../src/lfds700_misc/lfds700_misc_init.o
+lib-y += ../../src/lfds700_misc/lfds700_misc_prng.o
+lib-y += ../../src/lfds700_misc/lfds700_misc_query.o
+
+lib-y += ../../src/lfds700_queue/lfds700_queue_cleanup.o
+lib-y += ../../src/lfds700_queue/lfds700_queue_dequeue.o
+lib-y += ../../src/lfds700_queue/lfds700_queue_enqueue.o
+lib-y += ../../src/lfds700_queue/lfds700_queue_init.o
+lib-y += ../../src/lfds700_queue/lfds700_queue_query.o
+
+lib-y += ../../src/lfds700_queue_bounded_singleconsumer_singleproducer/lfds700_queue_bounded_singleconsumer_singleproducer_cleanup.o
+lib-y += ../../src/lfds700_queue_bounded_singleconsumer_singleproducer/lfds700_queue_bounded_singleconsumer_singleproducer_dequeue.o
+lib-y += ../../src/lfds700_queue_bounded_singleconsumer_singleproducer/lfds700_queue_bounded_singleconsumer_singleproducer_enqueue.o
+lib-y += ../../src/lfds700_queue_bounded_singleconsumer_singleproducer/lfds700_queue_bounded_singleconsumer_singleproducer_init.o
+lib-y += ../../src/lfds700_queue_bounded_singleconsumer_singleproducer/lfds700_queue_bounded_singleconsumer_singleproducer_query.o
+
+lib-y += ../../src/lfds700_ringbuffer/lfds700_ringbuffer_cleanup.o
+lib-y += ../../src/lfds700_ringbuffer/lfds700_ringbuffer_init.o
+lib-y += ../../src/lfds700_ringbuffer/lfds700_ringbuffer_query.o
+lib-y += ../../src/lfds700_ringbuffer/lfds700_ringbuffer_read.o
+lib-y += ../../src/lfds700_ringbuffer/lfds700_ringbuffer_write.o
+
+lib-y += ../../src/lfds700_stack/lfds700_stack_cleanup.o
+lib-y += ../../src/lfds700_stack/lfds700_stack_init.o
+lib-y += ../../src/lfds700_stack/lfds700_stack_pop.o
+lib-y += ../../src/lfds700_stack/lfds700_stack_push.o
+lib-y += ../../src/lfds700_stack/lfds700_stack_query.o
+
+libs-y := ../../bin/
+
+ccflags-y := -I$(src)/../../inc
+ccflags-y += -I$(src)/../../inc/liblfds700
+ccflags-y += -D_KERNEL_MODE
+ccflags-y += -fno-strict-aliasing
+ccflags-y += -std=gnu89
+ccflags-y += -Wall
+ccflags-y += -Werror
+ccflags-y += -Wno-unknown-pragmas
+ccflags-y += -Wno-unused-but-set-variable
+ccflags-y += -Wno-unused-variable
+
--- /dev/null
+default:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD)
+
+clean:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) clean
+ find ../../src/ -name "*.o" -type f -delete
+
+help:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) help
+
+modules:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) modules
+
+
--- /dev/null
+EXPORTS
+
+lfds700_btree_au_init_valid_on_current_logical_core = lfds700_btree_au_init_valid_on_current_logical_core
+lfds700_btree_au_cleanup = lfds700_btree_au_cleanup
+lfds700_btree_au_insert = lfds700_btree_au_insert
+lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position = lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position
+lfds700_btree_au_get_by_absolute_position = lfds700_btree_au_get_by_absolute_position
+lfds700_btree_au_get_by_relative_position = lfds700_btree_au_get_by_relative_position
+lfds700_btree_au_get_by_key = lfds700_btree_au_get_by_key
+lfds700_btree_au_query = lfds700_btree_au_query
+
+lfds700_freelist_init_valid_on_current_logical_core = lfds700_freelist_init_valid_on_current_logical_core
+lfds700_freelist_cleanup = lfds700_freelist_cleanup
+lfds700_freelist_push = lfds700_freelist_push
+lfds700_freelist_pop = lfds700_freelist_pop
+lfds700_freelist_query = lfds700_freelist_query
+
+lfds700_hash_a_init_valid_on_current_logical_core = lfds700_hash_a_init_valid_on_current_logical_core
+lfds700_hash_a_cleanup = lfds700_hash_a_cleanup
+lfds700_hash_a_insert = lfds700_hash_a_insert
+lfds700_hash_a_get_by_key = lfds700_hash_a_get_by_key
+lfds700_hash_a_iterate_init = lfds700_hash_a_iterate_init
+lfds700_hash_a_iterate = lfds700_hash_a_iterate
+lfds700_hash_a_query = lfds700_hash_a_query
+
+lfds700_list_aos_init_valid_on_current_logical_core = lfds700_list_aos_init_valid_on_current_logical_core
+lfds700_list_aos_cleanup = lfds700_list_aos_cleanup
+lfds700_list_aos_insert = lfds700_list_aos_insert
+lfds700_list_aos_get_by_key = lfds700_list_aos_get_by_key
+lfds700_list_aos_query = lfds700_list_aos_query
+
+lfds700_list_asu_init_valid_on_current_logical_core = lfds700_list_asu_init_valid_on_current_logical_core
+lfds700_list_asu_cleanup = lfds700_list_asu_cleanup
+lfds700_list_asu_insert_at_position = lfds700_list_asu_insert_at_position
+lfds700_list_asu_insert_at_start = lfds700_list_asu_insert_at_start
+lfds700_list_asu_insert_at_end = lfds700_list_asu_insert_at_end
+lfds700_list_asu_insert_after_element = lfds700_list_asu_insert_after_element
+lfds700_list_asu_get_by_key = lfds700_list_asu_get_by_key
+lfds700_list_asu_query = lfds700_list_asu_query
+
+lfds700_misc_library_init_valid_on_current_logical_core = lfds700_misc_library_init_valid_on_current_logical_core
+lfds700_misc_library_cleanup = lfds700_misc_library_cleanup
+lfds700_misc_prng_init = lfds700_misc_prng_init
+lfds700_misc_query = lfds700_misc_query
+
+lfds700_queue_init_valid_on_current_logical_core = lfds700_queue_init_valid_on_current_logical_core
+lfds700_queue_cleanup = lfds700_queue_cleanup
+lfds700_queue_enqueue = lfds700_queue_enqueue
+lfds700_queue_dequeue = lfds700_queue_dequeue
+lfds700_queue_query = lfds700_queue_query
+
+lfds700_queue_bss_init_valid_on_current_logical_core = lfds700_queue_bss_init_valid_on_current_logical_core
+lfds700_queue_bss_cleanup = lfds700_queue_bss_cleanup
+lfds700_queue_bss_enqueue = lfds700_queue_bss_enqueue
+lfds700_queue_bss_dequeue = lfds700_queue_bss_dequeue
+lfds700_queue_bss_query = lfds700_queue_bss_query
+
+lfds700_ringbuffer_init_valid_on_current_logical_core = lfds700_ringbuffer_init_valid_on_current_logical_core
+lfds700_ringbuffer_cleanup = lfds700_ringbuffer_cleanup
+lfds700_ringbuffer_read = lfds700_ringbuffer_read
+lfds700_ringbuffer_write = lfds700_ringbuffer_write
+lfds700_ringbuffer_query = lfds700_ringbuffer_query
+
+lfds700_stack_init_valid_on_current_logical_core = lfds700_stack_init_valid_on_current_logical_core
+lfds700_stack_cleanup = lfds700_stack_cleanup
+lfds700_stack_push = lfds700_stack_push
+lfds700_stack_pop = lfds700_stack_pop
+lfds700_stack_query = lfds700_stack_query
+
--- /dev/null
+##### paths #####
+BINDIR := ..\..\bin
+INCDIR := ..\..\inc
+OBJDIR := ..\..\obj
+SRCDIR := ..\..\src
+
+##### misc #####
+QUIETLY := 1>nul 2>nul
+NULL :=
+SPACE := $(NULL) # TRD : with a trailing space
+
+##### sources, objects and libraries #####
+BINNAME := liblfds700
+LIB_BINARY := $(BINDIR)\$(BINNAME).lib
+DLL_BINARY := $(BINDIR)\$(BINNAME).dll
+SRCDIRS := lfds700_btree_addonly_unbalanced lfds700_freelist lfds700_hash_addonly lfds700_list_addonly_ordered_singlylinked lfds700_list_addonly_singlylinked_unordered lfds700_misc lfds700_queue lfds700_queue_bounded_singleconsumer_singleproducer lfds700_ringbuffer lfds700_stack
+SOURCES := lfds700_hash_addonly_cleanup.c lfds700_hash_addonly_get.c lfds700_hash_addonly_init.c lfds700_hash_addonly_insert.c lfds700_hash_addonly_iterate.c lfds700_hash_addonly_query.c \
+ lfds700_list_addonly_ordered_singlylinked_cleanup.c lfds700_list_addonly_ordered_singlylinked_get.c lfds700_list_addonly_ordered_singlylinked_init.c lfds700_list_addonly_ordered_singlylinked_insert.c lfds700_list_addonly_ordered_singlylinked_query.c \
+ lfds700_list_addonly_singlylinked_unordered_cleanup.c lfds700_list_addonly_singlylinked_unordered_get.c lfds700_list_addonly_singlylinked_unordered_init.c lfds700_list_addonly_singlylinked_unordered_insert.c lfds700_list_addonly_singlylinked_unordered_query.c \
+ lfds700_btree_addonly_unbalanced_cleanup.c lfds700_btree_addonly_unbalanced_get.c lfds700_btree_addonly_unbalanced_init.c lfds700_btree_addonly_unbalanced_insert.c lfds700_btree_addonly_unbalanced_query.c \
+ lfds700_freelist_cleanup.c lfds700_freelist_init.c lfds700_freelist_pop.c lfds700_freelist_push.c lfds700_freelist_query.c \
+ lfds700_misc_cleanup.c lfds700_misc_globals.c lfds700_misc_init.c lfds700_misc_prng.c lfds700_misc_query.c \
+ lfds700_queue_cleanup.c lfds700_queue_dequeue.c lfds700_queue_enqueue.c lfds700_queue_init.c lfds700_queue_query.c \
+ lfds700_queue_bounded_singleconsumer_singleproducer_cleanup.c lfds700_queue_bounded_singleconsumer_singleproducer_dequeue.c lfds700_queue_bounded_singleconsumer_singleproducer_enqueue.c lfds700_queue_bounded_singleconsumer_singleproducer_init.c lfds700_queue_bounded_singleconsumer_singleproducer_query.c \
+ lfds700_ringbuffer_cleanup.c lfds700_ringbuffer_init.c lfds700_ringbuffer_query.c lfds700_ringbuffer_read.c lfds700_ringbuffer_write.c \
+ lfds700_stack_cleanup.c lfds700_stack_init.c lfds700_stack_pop.c lfds700_stack_push.c lfds700_stack_query.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))
+SYSLIBS := kernel32.lib
+
+##### default paths fix up #####
+INCDIRS := $(patsubst %,%;,$(INCDIR))
+INCLUDE += $(subst $(SPACE),,$(INCDIRS))
+
+##### tools #####
+MAKE := make
+MFLAGS :=
+
+CC := cl
+CBASE := /c "-I$(SRCDIR)" "/Fd$(BINDIR)\$(BINNAME).pdb" /D_CRT_SECURE_NO_WARNINGS /DWIN32_LEAN_AND_MEAN /DUNICODE /D_UNICODE /DUNICODE /nologo /W4 /wd 4068 /WX
+CFREL := /DNDEBUG /Ox
+CFDBG := /D_DEBUG /Gm /Od /Zi
+
+AR := lib
+AFLAGS := /nologo /subsystem:console /verbose /wx
+
+LD := link
+LFBASE := /def:$(BINNAME).def /dll /nodefaultlib /nologo /nxcompat /subsystem:console /wx
+LFREL := /incremental:no
+LFDBG := /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"
+
+##### variants #####
+CFLAGS := $(CBASE) $(CFDBG) /MTd
+ASFLAGS := $(ASBASE) $(ASDBG)
+LFLAGS := $(LFBASE) $(LFDBG)
+CLIB := libcmtd.lib
+
+ifeq ($(MAKECMDGOALS),librel)
+ CFLAGS := $(CBASE) $(CFREL) /MT
+ ASFLAGS := $(ASBASE) $(ASREL)
+ LFLAGS := $(LFBASE) $(LFREL)
+ CLIB := libcmt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),libdbg)
+ CFLAGS := $(CBASE) $(CFDBG) /MTd
+ ASFLAGS := $(ASBASE) $(ASDBG)
+ LFLAGS := $(LFBASE) $(LFDBG)
+ CLIB := libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dllrel)
+ CFLAGS := $(CBASE) $(CFREL) /MD
+ ASFLAGS := $(ASBASE) $(ASREL)
+ LFLAGS := $(LFBASE) $(LFREL)
+ CLIB := msvcrt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dlldbg)
+ CFLAGS := $(CBASE) $(CFDBG) /MDd
+ ASFLAGS := $(ASBASE) $(ASDBG)
+ LFLAGS := $(LFBASE) $(LFDBG)
+ CLIB := msvcrtd.lib
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.obj : %.c
+ $(CC) $(CFLAGS) "/Fo$@" $<
+
+##### explicit rules #####
+$(LIB_BINARY) : $(OBJECTS)
+ $(AR) $(AFLAGS) $(OBJECTS) /out:$(LIB_BINARY)
+
+$(DLL_BINARY) : $(OBJECTS)
+ $(LD) $(LFLAGS) $(CLIB) $(SYSLIBS) $(OBJECTS) /out:$(DLL_BINARY)
+
+##### phony #####
+.PHONY : clean librel libdbg dllrel dlldbg
+
+clean :
+ @erase /Q $(BINDIR)\$(BINNAME).* $(OBJDIR)\*.obj $(QUIETLY)
+
+dlldbg : $(DLL_BINARY)
+dllrel : $(DLL_BINARY)
+
+libdbg : $(LIB_BINARY)
+librel : $(LIB_BINARY)
+
+##### notes #####
+# /wd 4068 : turn off "unknown pragma" warning
+
+
--- /dev/null
+EXPORTS
+
+lfds700_btree_au_init_valid_on_current_logical_core = lfds700_btree_au_init_valid_on_current_logical_core
+lfds700_btree_au_cleanup = lfds700_btree_au_cleanup
+lfds700_btree_au_insert = lfds700_btree_au_insert
+lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position = lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position
+lfds700_btree_au_get_by_absolute_position = lfds700_btree_au_get_by_absolute_position
+lfds700_btree_au_get_by_relative_position = lfds700_btree_au_get_by_relative_position
+lfds700_btree_au_get_by_key = lfds700_btree_au_get_by_key
+lfds700_btree_au_query = lfds700_btree_au_query
+
+lfds700_freelist_init_valid_on_current_logical_core = lfds700_freelist_init_valid_on_current_logical_core
+lfds700_freelist_cleanup = lfds700_freelist_cleanup
+lfds700_freelist_push = lfds700_freelist_push
+lfds700_freelist_pop = lfds700_freelist_pop
+lfds700_freelist_query = lfds700_freelist_query
+
+lfds700_hash_a_init_valid_on_current_logical_core = lfds700_hash_a_init_valid_on_current_logical_core
+lfds700_hash_a_cleanup = lfds700_hash_a_cleanup
+lfds700_hash_a_insert = lfds700_hash_a_insert
+lfds700_hash_a_get_by_key = lfds700_hash_a_get_by_key
+lfds700_hash_a_iterate_init = lfds700_hash_a_iterate_init
+lfds700_hash_a_iterate = lfds700_hash_a_iterate
+lfds700_hash_a_query = lfds700_hash_a_query
+
+lfds700_list_aos_init_valid_on_current_logical_core = lfds700_list_aos_init_valid_on_current_logical_core
+lfds700_list_aos_cleanup = lfds700_list_aos_cleanup
+lfds700_list_aos_insert = lfds700_list_aos_insert
+lfds700_list_aos_get_by_key = lfds700_list_aos_get_by_key
+lfds700_list_aos_query = lfds700_list_aos_query
+
+lfds700_list_asu_init_valid_on_current_logical_core = lfds700_list_asu_init_valid_on_current_logical_core
+lfds700_list_asu_cleanup = lfds700_list_asu_cleanup
+lfds700_list_asu_insert_at_position = lfds700_list_asu_insert_at_position
+lfds700_list_asu_insert_at_start = lfds700_list_asu_insert_at_start
+lfds700_list_asu_insert_at_end = lfds700_list_asu_insert_at_end
+lfds700_list_asu_insert_after_element = lfds700_list_asu_insert_after_element
+lfds700_list_asu_get_by_key = lfds700_list_asu_get_by_key
+lfds700_list_asu_query = lfds700_list_asu_query
+
+lfds700_misc_library_init_valid_on_current_logical_core = lfds700_misc_library_init_valid_on_current_logical_core
+lfds700_misc_library_cleanup = lfds700_misc_library_cleanup
+lfds700_misc_prng_init = lfds700_misc_prng_init
+lfds700_misc_query = lfds700_misc_query
+
+lfds700_queue_init_valid_on_current_logical_core = lfds700_queue_init_valid_on_current_logical_core
+lfds700_queue_cleanup = lfds700_queue_cleanup
+lfds700_queue_enqueue = lfds700_queue_enqueue
+lfds700_queue_dequeue = lfds700_queue_dequeue
+lfds700_queue_query = lfds700_queue_query
+
+lfds700_queue_bss_init_valid_on_current_logical_core = lfds700_queue_bss_init_valid_on_current_logical_core
+lfds700_queue_bss_cleanup = lfds700_queue_bss_cleanup
+lfds700_queue_bss_enqueue = lfds700_queue_bss_enqueue
+lfds700_queue_bss_dequeue = lfds700_queue_bss_dequeue
+lfds700_queue_bss_query = lfds700_queue_bss_query
+
+lfds700_ringbuffer_init_valid_on_current_logical_core = lfds700_ringbuffer_init_valid_on_current_logical_core
+lfds700_ringbuffer_cleanup = lfds700_ringbuffer_cleanup
+lfds700_ringbuffer_read = lfds700_ringbuffer_read
+lfds700_ringbuffer_write = lfds700_ringbuffer_write
+lfds700_ringbuffer_query = lfds700_ringbuffer_query
+
+lfds700_stack_init_valid_on_current_logical_core = lfds700_stack_init_valid_on_current_logical_core
+lfds700_stack_cleanup = lfds700_stack_cleanup
+lfds700_stack_push = lfds700_stack_push
+lfds700_stack_pop = lfds700_stack_pop
+lfds700_stack_query = lfds700_stack_query
+
--- /dev/null
+\r
+Microsoft Visual Studio Solution File, Format Version 12.00\r
+# Visual Studio 2012\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblfds700", "liblfds700.vcxproj", "{1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug DLL|Win32 = Debug DLL|Win32\r
+ Debug DLL|x64 = Debug DLL|x64\r
+ Debug LIB|Win32 = Debug LIB|Win32\r
+ Debug LIB|x64 = Debug LIB|x64\r
+ Release DLL|Win32 = Release DLL|Win32\r
+ Release DLL|x64 = Release DLL|x64\r
+ Release LIB|Win32 = Release LIB|Win32\r
+ Release LIB|x64 = Release LIB|x64\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|Win32.Build.0 = Debug DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|Win32.Deploy.0 = Debug DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|x64.ActiveCfg = Debug DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|x64.Build.0 = Debug DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|x64.Deploy.0 = Debug DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|Win32.ActiveCfg = Debug LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|Win32.Build.0 = Debug LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|Win32.Deploy.0 = Debug LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|x64.ActiveCfg = Debug LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|x64.Build.0 = Debug LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|x64.Deploy.0 = Debug LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|Win32.ActiveCfg = Release DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|Win32.Build.0 = Release DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|Win32.Deploy.0 = Release DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|x64.ActiveCfg = Release DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|x64.Build.0 = Release DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|x64.Deploy.0 = Release DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|Win32.ActiveCfg = Release LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|Win32.Build.0 = Release LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|Win32.Deploy.0 = Release LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|x64.ActiveCfg = Release LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|x64.Build.0 = Release LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|x64.Deploy.0 = Release LIB|x64\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup Label="ProjectConfigurations">\r
+ <ProjectConfiguration Include="Debug DLL|Win32">\r
+ <Configuration>Debug DLL</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug DLL|x64">\r
+ <Configuration>Debug DLL</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug LIB|Win32">\r
+ <Configuration>Debug LIB</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug LIB|x64">\r
+ <Configuration>Debug LIB</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release DLL|Win32">\r
+ <Configuration>Release DLL</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release DLL|x64">\r
+ <Configuration>Release DLL</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release LIB|Win32">\r
+ <Configuration>Release LIB</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release LIB|x64">\r
+ <Configuration>Release LIB</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ </ItemGroup>\r
+ <PropertyGroup Label="Globals">\r
+ <ProjectGuid>{1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}</ProjectGuid>\r
+ <Keyword>Win32Proj</Keyword>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\r
+ <ImportGroup Label="ExtensionSettings">\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <PropertyGroup Label="UserMacros" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'">\r
+ <LinkIncremental>\r
+ </LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'">\r
+ <LinkIncremental>\r
+ </LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'">\r
+ <LinkIncremental>\r
+ </LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'">\r
+ <LinkIncremental>\r
+ </LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Console</SubSystem>\r
+ <AdditionalDependencies>kernel32.lib;msvcrtd.lib</AdditionalDependencies>\r
+ <LinkStatus>\r
+ </LinkStatus>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <MapExports>true</MapExports>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <GenerateMapFile>true</GenerateMapFile>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ <ProjectReference>\r
+ <LinkLibraryDependencies>true</LinkLibraryDependencies>\r
+ </ProjectReference>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <OmitFramePointers>false</OmitFramePointers>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX64</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <OmitFramePointers>false</OmitFramePointers>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Console</SubSystem>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <MapExports>true</MapExports>\r
+ <OptimizeReferences>false</OptimizeReferences>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <AdditionalDependencies>kernel32.lib;msvcrtd.lib</AdditionalDependencies>\r
+ <GenerateMapFile>true</GenerateMapFile>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX64</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>false</GenerateDebugInformation>\r
+ <SubSystem>Console</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <AdditionalDependencies>kernel32.lib;msvcrt.lib</AdditionalDependencies>\r
+ <LinkStatus>\r
+ </LinkStatus>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ <ProjectReference>\r
+ <LinkLibraryDependencies>true</LinkLibraryDependencies>\r
+ </ProjectReference>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX64</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>false</GenerateDebugInformation>\r
+ <SubSystem>Console</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <AdditionalDependencies>kernel32.lib;msvcrt.lib</AdditionalDependencies>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX64</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemGroup>\r
+ <ClInclude Include="..\..\inc\liblfds700.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_btree_addonly_unbalanced.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_freelist.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_hash_addonly.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_list_addonly_ordered_singlylinked.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_list_addonly_singlylinked_unordered.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_misc.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_compiler.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_operating_system.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_processor.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_queue.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_queue_bounded_singleconsumer_singleproducer.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_ringbuffer.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_stack.h" />\r
+ <ClInclude Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_freelist\lfds700_freelist_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_misc\lfds700_misc_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_queue\lfds700_queue_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_stack\lfds700_stack_internal.h" />\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_get.c" />\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_insert.c" />\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_pop.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_push.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_get.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_insert.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_iterate.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_get.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_insert.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_get.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_insert.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_globals.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_prng.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_dequeue.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_enqueue.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_dequeue.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_enqueue.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_read.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_write.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_pop.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_push.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_query.c" />\r
+ </ItemGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
+ <ImportGroup Label="ExtensionTargets">\r
+ </ImportGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup>\r
+ <Filter Include="Source Files">\r
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>\r
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>\r
+ </Filter>\r
+ <Filter Include="Header Files">\r
+ <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>\r
+ <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>\r
+ </Filter>\r
+ <Filter Include="Resource Files">\r
+ <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>\r
+ <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav</Extensions>\r
+ </Filter>\r
+ <Filter Include="Header Files\liblfds700">\r
+ <UniqueIdentifier>{258be429-7dac-4999-b995-753aa2f0c505}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_freelist">\r
+ <UniqueIdentifier>{469abf8e-47d8-4678-bd66-7c7e65c5f52e}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_ringbuffer">\r
+ <UniqueIdentifier>{62ee141b-2acb-4555-b016-7be20a57f2bf}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_stack">\r
+ <UniqueIdentifier>{19c73b0f-25e0-4166-9093-427f1dfb4f70}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_queue">\r
+ <UniqueIdentifier>{00eb30fe-e638-4c2b-8ca1-1f09c4a0ed45}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_misc">\r
+ <UniqueIdentifier>{400ae4e9-2281-4549-b918-59d1a27a2d07}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_btree_addonly_unbalanced">\r
+ <UniqueIdentifier>{0b1fafc3-817b-4c18-8eb1-121884e3a29b}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_hash_addonly">\r
+ <UniqueIdentifier>{bcbadc74-1748-4696-aad7-7fdbe5614624}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_list_addonly_ordered_singlylinked">\r
+ <UniqueIdentifier>{c45194af-7b41-4c28-bc0e-1095ec347664}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_queue_bounded_singleconsumer_singleproducer">\r
+ <UniqueIdentifier>{6250c4d5-ac8e-4c28-93de-0954c5bed1cb}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_list_addonly_singlylinked_unordered">\r
+ <UniqueIdentifier>{3ac93721-1d81-49e4-9581-dbc12ace5c0c}</UniqueIdentifier>\r
+ </Filter>\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClInclude Include="..\..\inc\liblfds700.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_freelist\lfds700_freelist_internal.h">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_queue\lfds700_queue_internal.h">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_stack\lfds700_stack_internal.h">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_btree_addonly_unbalanced.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_freelist.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_hash_addonly.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_list_addonly_ordered_singlylinked.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_list_addonly_singlylinked_unordered.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_misc.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_compiler.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_operating_system.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_processor.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_queue.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_queue_bounded_singleconsumer_singleproducer.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_ringbuffer.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_stack.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_internal.h">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_misc\lfds700_misc_internal.h">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_internal.h">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_internal.h">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_internal.h">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_internal.h">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_internal.h">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClInclude>\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_cleanup.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_init.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_pop.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_push.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_query.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_cleanup.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_dequeue.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_enqueue.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_init.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_query.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_cleanup.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_init.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_pop.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_push.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_query.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_cleanup.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_get.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_init.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_insert.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_iterate.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_query.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_cleanup.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_globals.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_init.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_prng.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_query.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_cleanup.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_get.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_init.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_insert.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_query.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_cleanup.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_init.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_query.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_read.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_write.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_cleanup.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_dequeue.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_enqueue.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_init.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_query.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_cleanup.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_get.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_init.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_insert.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_query.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_cleanup.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_get.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_init.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_insert.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_query.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ </ItemGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <PropertyGroup />\r
+</Project>
\ No newline at end of file
--- /dev/null
+#include <wdf.h>
--- /dev/null
+EXPORTS
+
+lfds700_btree_au_init_valid_on_current_logical_core = lfds700_btree_au_init_valid_on_current_logical_core
+lfds700_btree_au_cleanup = lfds700_btree_au_cleanup
+lfds700_btree_au_insert = lfds700_btree_au_insert
+lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position = lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position
+lfds700_btree_au_get_by_absolute_position = lfds700_btree_au_get_by_absolute_position
+lfds700_btree_au_get_by_relative_position = lfds700_btree_au_get_by_relative_position
+lfds700_btree_au_get_by_key = lfds700_btree_au_get_by_key
+lfds700_btree_au_query = lfds700_btree_au_query
+
+lfds700_freelist_init_valid_on_current_logical_core = lfds700_freelist_init_valid_on_current_logical_core
+lfds700_freelist_cleanup = lfds700_freelist_cleanup
+lfds700_freelist_push = lfds700_freelist_push
+lfds700_freelist_pop = lfds700_freelist_pop
+lfds700_freelist_query = lfds700_freelist_query
+
+lfds700_hash_a_init_valid_on_current_logical_core = lfds700_hash_a_init_valid_on_current_logical_core
+lfds700_hash_a_cleanup = lfds700_hash_a_cleanup
+lfds700_hash_a_insert = lfds700_hash_a_insert
+lfds700_hash_a_get_by_key = lfds700_hash_a_get_by_key
+lfds700_hash_a_iterate_init = lfds700_hash_a_iterate_init
+lfds700_hash_a_iterate = lfds700_hash_a_iterate
+lfds700_hash_a_query = lfds700_hash_a_query
+
+lfds700_list_aos_init_valid_on_current_logical_core = lfds700_list_aos_init_valid_on_current_logical_core
+lfds700_list_aos_cleanup = lfds700_list_aos_cleanup
+lfds700_list_aos_insert = lfds700_list_aos_insert
+lfds700_list_aos_get_by_key = lfds700_list_aos_get_by_key
+lfds700_list_aos_query = lfds700_list_aos_query
+
+lfds700_list_asu_init_valid_on_current_logical_core = lfds700_list_asu_init_valid_on_current_logical_core
+lfds700_list_asu_cleanup = lfds700_list_asu_cleanup
+lfds700_list_asu_insert_at_position = lfds700_list_asu_insert_at_position
+lfds700_list_asu_insert_at_start = lfds700_list_asu_insert_at_start
+lfds700_list_asu_insert_at_end = lfds700_list_asu_insert_at_end
+lfds700_list_asu_insert_after_element = lfds700_list_asu_insert_after_element
+lfds700_list_asu_get_by_key = lfds700_list_asu_get_by_key
+lfds700_list_asu_query = lfds700_list_asu_query
+
+lfds700_misc_library_init_valid_on_current_logical_core = lfds700_misc_library_init_valid_on_current_logical_core
+lfds700_misc_library_cleanup = lfds700_misc_library_cleanup
+lfds700_misc_prng_init = lfds700_misc_prng_init
+lfds700_misc_query = lfds700_misc_query
+
+lfds700_queue_init_valid_on_current_logical_core = lfds700_queue_init_valid_on_current_logical_core
+lfds700_queue_cleanup = lfds700_queue_cleanup
+lfds700_queue_enqueue = lfds700_queue_enqueue
+lfds700_queue_dequeue = lfds700_queue_dequeue
+lfds700_queue_query = lfds700_queue_query
+
+lfds700_queue_bss_init_valid_on_current_logical_core = lfds700_queue_bss_init_valid_on_current_logical_core
+lfds700_queue_bss_cleanup = lfds700_queue_bss_cleanup
+lfds700_queue_bss_enqueue = lfds700_queue_bss_enqueue
+lfds700_queue_bss_dequeue = lfds700_queue_bss_dequeue
+lfds700_queue_bss_query = lfds700_queue_bss_query
+
+lfds700_ringbuffer_init_valid_on_current_logical_core = lfds700_ringbuffer_init_valid_on_current_logical_core
+lfds700_ringbuffer_cleanup = lfds700_ringbuffer_cleanup
+lfds700_ringbuffer_read = lfds700_ringbuffer_read
+lfds700_ringbuffer_write = lfds700_ringbuffer_write
+lfds700_ringbuffer_query = lfds700_ringbuffer_query
+
+lfds700_stack_init_valid_on_current_logical_core = lfds700_stack_init_valid_on_current_logical_core
+lfds700_stack_cleanup = lfds700_stack_cleanup
+lfds700_stack_push = lfds700_stack_push
+lfds700_stack_pop = lfds700_stack_pop
+lfds700_stack_query = lfds700_stack_query
+
--- /dev/null
+\r
+Microsoft Visual Studio Solution File, Format Version 12.00\r
+# Visual Studio 2012\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblfds700", "liblfds700.vcxproj", "{1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug DLL|Win32 = Debug DLL|Win32\r
+ Debug DLL|x64 = Debug DLL|x64\r
+ Debug LIB|Win32 = Debug LIB|Win32\r
+ Debug LIB|x64 = Debug LIB|x64\r
+ Release DLL|Win32 = Release DLL|Win32\r
+ Release DLL|x64 = Release DLL|x64\r
+ Release LIB|Win32 = Release LIB|Win32\r
+ Release LIB|x64 = Release LIB|x64\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|Win32.Build.0 = Debug DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|Win32.Deploy.0 = Debug DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|x64.ActiveCfg = Debug DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|x64.Build.0 = Debug DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|x64.Deploy.0 = Debug DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|Win32.ActiveCfg = Debug LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|Win32.Build.0 = Debug LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|Win32.Deploy.0 = Debug LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|x64.ActiveCfg = Debug LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|x64.Build.0 = Debug LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|x64.Deploy.0 = Debug LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|Win32.ActiveCfg = Release DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|Win32.Build.0 = Release DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|Win32.Deploy.0 = Release DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|x64.ActiveCfg = Release DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|x64.Build.0 = Release DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|x64.Deploy.0 = Release DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|Win32.ActiveCfg = Release LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|Win32.Build.0 = Release LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|Win32.Deploy.0 = Release LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|x64.ActiveCfg = Release LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|x64.Build.0 = Release LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|x64.Deploy.0 = Release LIB|x64\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup Label="ProjectConfigurations">\r
+ <ProjectConfiguration Include="Debug DLL|Win32">\r
+ <Configuration>Debug DLL</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug DLL|x64">\r
+ <Configuration>Debug DLL</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug LIB|Win32">\r
+ <Configuration>Debug LIB</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug LIB|x64">\r
+ <Configuration>Debug LIB</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release DLL|Win32">\r
+ <Configuration>Release DLL</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release DLL|x64">\r
+ <Configuration>Release DLL</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release LIB|Win32">\r
+ <Configuration>Release LIB</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release LIB|x64">\r
+ <Configuration>Release LIB</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ </ItemGroup>\r
+ <PropertyGroup Label="Globals">\r
+ <ProjectGuid>{1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}</ProjectGuid>\r
+ <Keyword>Win32Proj</Keyword>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.0</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.0</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.0</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.0</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.0</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.0</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.0</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.0</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\r
+ <ImportGroup Label="ExtensionSettings">\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <PropertyGroup Label="UserMacros" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);</IncludePath>\r
+ <LibraryPath>$(WDKContentRoot)lib\$(DDKSpec)\KM\$(DDKPlatform);$(LibraryPath);</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'">\r
+ <LinkIncremental>\r
+ </LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);</IncludePath>\r
+ <LibraryPath>$(WDKContentRoot)lib\$(DDKSpec)\KM\$(DDKPlatform);$(LibraryPath);</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);</IncludePath>\r
+ <LibraryPath>$(WDKContentRoot)lib\$(DDKSpec)\KM\$(DDKPlatform);$(LibraryPath);</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'">\r
+ <LinkIncremental>\r
+ </LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);</IncludePath>\r
+ <LibraryPath>$(WDKContentRoot)lib\$(DDKSpec)\KM\$(DDKPlatform);$(LibraryPath);</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);</IncludePath>\r
+ <LibraryPath>$(WDKContentRoot)lib\$(DDKSpec)\KM\$(DDKPlatform);$(LibraryPath);</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'">\r
+ <LinkIncremental>\r
+ </LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);</IncludePath>\r
+ <LibraryPath>$(WDKContentRoot)lib\$(DDKSpec)\KM\$(DDKPlatform);$(LibraryPath);</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);</IncludePath>\r
+ <LibraryPath>$(WDKContentRoot)lib\$(DDKSpec)\KM\$(DDKPlatform);$(LibraryPath);</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'">\r
+ <LinkIncremental>\r
+ </LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);</IncludePath>\r
+ <LibraryPath>$(WDKContentRoot)lib\$(DDKSpec)\KM\$(DDKPlatform);$(LibraryPath);</LibraryPath>\r
+ </PropertyGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Native</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Native</SubSystem>\r
+ <AdditionalDependencies>kernel32.lib;msvcrtd.lib</AdditionalDependencies>\r
+ <LinkStatus>\r
+ </LinkStatus>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <MapExports>true</MapExports>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <GenerateMapFile>true</GenerateMapFile>\r
+ <Driver>WDM</Driver>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ <ProjectReference>\r
+ <LinkLibraryDependencies>true</LinkLibraryDependencies>\r
+ </ProjectReference>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <OmitFramePointers>false</OmitFramePointers>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX64</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Native</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <OmitFramePointers>false</OmitFramePointers>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Native</SubSystem>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <MapExports>true</MapExports>\r
+ <OptimizeReferences>false</OptimizeReferences>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <AdditionalDependencies>kernel32.lib;msvcrtd.lib</AdditionalDependencies>\r
+ <GenerateMapFile>true</GenerateMapFile>\r
+ <Driver>WDM</Driver>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX64</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Native</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>false</GenerateDebugInformation>\r
+ <SubSystem>Native</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <AdditionalDependencies>kernel32.lib;msvcrt.lib</AdditionalDependencies>\r
+ <LinkStatus>\r
+ </LinkStatus>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <Driver>WDM</Driver>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ <ProjectReference>\r
+ <LinkLibraryDependencies>true</LinkLibraryDependencies>\r
+ </ProjectReference>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX64</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Native</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>false</GenerateDebugInformation>\r
+ <SubSystem>Native</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <AdditionalDependencies>kernel32.lib;msvcrt.lib</AdditionalDependencies>\r
+ <Driver>WDM</Driver>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX64</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemGroup>\r
+ <ClInclude Include="..\..\inc\liblfds700.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_btree_addonly_unbalanced.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_freelist.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_hash_addonly.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_list_addonly_ordered_singlylinked.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_list_addonly_singlylinked_unordered.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_misc.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_compiler.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_operating_system.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_processor.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_queue.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_queue_bounded_singleconsumer_singleproducer.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_ringbuffer.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_stack.h" />\r
+ <ClInclude Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_freelist\lfds700_freelist_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_misc\lfds700_misc_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_queue\lfds700_queue_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_stack\lfds700_stack_internal.h" />\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_get.c" />\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_insert.c" />\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_pop.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_push.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_get.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_insert.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_iterate.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_get.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_insert.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_get.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_insert.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_globals.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_prng.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_dequeue.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_enqueue.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_dequeue.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_enqueue.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_read.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_write.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_pop.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_push.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_query.c" />\r
+ </ItemGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
+ <ImportGroup Label="ExtensionTargets">\r
+ </ImportGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup>\r
+ <Filter Include="Source Files">\r
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>\r
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>\r
+ </Filter>\r
+ <Filter Include="Header Files">\r
+ <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>\r
+ <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>\r
+ </Filter>\r
+ <Filter Include="Resource Files">\r
+ <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>\r
+ <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav</Extensions>\r
+ </Filter>\r
+ <Filter Include="Header Files\liblfds700">\r
+ <UniqueIdentifier>{258be429-7dac-4999-b995-753aa2f0c505}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_freelist">\r
+ <UniqueIdentifier>{469abf8e-47d8-4678-bd66-7c7e65c5f52e}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_ringbuffer">\r
+ <UniqueIdentifier>{62ee141b-2acb-4555-b016-7be20a57f2bf}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_stack">\r
+ <UniqueIdentifier>{19c73b0f-25e0-4166-9093-427f1dfb4f70}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_queue">\r
+ <UniqueIdentifier>{00eb30fe-e638-4c2b-8ca1-1f09c4a0ed45}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_queue_bounded_singleconsumer_singleproducer">\r
+ <UniqueIdentifier>{6250c4d5-ac8e-4c28-93de-0954c5bed1cb}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_misc">\r
+ <UniqueIdentifier>{400ae4e9-2281-4549-b918-59d1a27a2d07}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_list_addonly_ordered_singlylinked">\r
+ <UniqueIdentifier>{c45194af-7b41-4c28-bc0e-1095ec347664}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_list_addonly_singlylinked_unordered">\r
+ <UniqueIdentifier>{8b3cbb5c-7436-429f-9b72-bae1f4721746}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_hash_addonly">\r
+ <UniqueIdentifier>{bcbadc74-1748-4696-aad7-7fdbe5614624}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_btree_addonly_unbalanced">\r
+ <UniqueIdentifier>{0b1fafc3-817b-4c18-8eb1-121884e3a29b}</UniqueIdentifier>\r
+ </Filter>\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClInclude Include="..\..\inc\liblfds700.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_freelist\lfds700_freelist_internal.h">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_stack\lfds700_stack_internal.h">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_queue\lfds700_queue_internal.h">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_btree_addonly_unbalanced.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_freelist.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_hash_addonly.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_list_addonly_ordered_singlylinked.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_list_addonly_singlylinked_unordered.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_misc.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_compiler.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_operating_system.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_processor.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_queue.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_queue_bounded_singleconsumer_singleproducer.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_ringbuffer.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_stack.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_internal.h">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_internal.h">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_internal.h">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_misc\lfds700_misc_internal.h">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_internal.h">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_internal.h">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_internal.h">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClInclude>\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_cleanup.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_init.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_pop.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_push.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_query.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_cleanup.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_init.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_pop.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_push.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_query.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_cleanup.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_dequeue.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_enqueue.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_init.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_query.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_cleanup.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_get.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_init.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_insert.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_iterate.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_query.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_cleanup.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_get.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_init.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_insert.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_query.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_cleanup.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_get.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_init.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_insert.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_query.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_cleanup.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_globals.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_init.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_prng.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_query.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_cleanup.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_dequeue.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_enqueue.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_init.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_query.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_cleanup.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_get.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_init.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_insert.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_query.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_cleanup.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_init.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_query.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_read.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_write.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ </ItemGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <PropertyGroup />\r
+</Project>
\ No newline at end of file
--- /dev/null
+#include <wdf.h>
--- /dev/null
+EXPORTS
+
+lfds700_btree_au_init_valid_on_current_logical_core = lfds700_btree_au_init_valid_on_current_logical_core
+lfds700_btree_au_cleanup = lfds700_btree_au_cleanup
+lfds700_btree_au_insert = lfds700_btree_au_insert
+lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position = lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position
+lfds700_btree_au_get_by_absolute_position = lfds700_btree_au_get_by_absolute_position
+lfds700_btree_au_get_by_relative_position = lfds700_btree_au_get_by_relative_position
+lfds700_btree_au_get_by_key = lfds700_btree_au_get_by_key
+lfds700_btree_au_query = lfds700_btree_au_query
+
+lfds700_freelist_init_valid_on_current_logical_core = lfds700_freelist_init_valid_on_current_logical_core
+lfds700_freelist_cleanup = lfds700_freelist_cleanup
+lfds700_freelist_push = lfds700_freelist_push
+lfds700_freelist_pop = lfds700_freelist_pop
+lfds700_freelist_query = lfds700_freelist_query
+
+lfds700_hash_a_init_valid_on_current_logical_core = lfds700_hash_a_init_valid_on_current_logical_core
+lfds700_hash_a_cleanup = lfds700_hash_a_cleanup
+lfds700_hash_a_insert = lfds700_hash_a_insert
+lfds700_hash_a_get_by_key = lfds700_hash_a_get_by_key
+lfds700_hash_a_iterate_init = lfds700_hash_a_iterate_init
+lfds700_hash_a_iterate = lfds700_hash_a_iterate
+lfds700_hash_a_query = lfds700_hash_a_query
+
+lfds700_list_aos_init_valid_on_current_logical_core = lfds700_list_aos_init_valid_on_current_logical_core
+lfds700_list_aos_cleanup = lfds700_list_aos_cleanup
+lfds700_list_aos_insert = lfds700_list_aos_insert
+lfds700_list_aos_get_by_key = lfds700_list_aos_get_by_key
+lfds700_list_aos_query = lfds700_list_aos_query
+
+lfds700_list_asu_init_valid_on_current_logical_core = lfds700_list_asu_init_valid_on_current_logical_core
+lfds700_list_asu_cleanup = lfds700_list_asu_cleanup
+lfds700_list_asu_insert_at_position = lfds700_list_asu_insert_at_position
+lfds700_list_asu_insert_at_start = lfds700_list_asu_insert_at_start
+lfds700_list_asu_insert_at_end = lfds700_list_asu_insert_at_end
+lfds700_list_asu_insert_after_element = lfds700_list_asu_insert_after_element
+lfds700_list_asu_get_by_key = lfds700_list_asu_get_by_key
+lfds700_list_asu_query = lfds700_list_asu_query
+
+lfds700_misc_library_init_valid_on_current_logical_core = lfds700_misc_library_init_valid_on_current_logical_core
+lfds700_misc_library_cleanup = lfds700_misc_library_cleanup
+lfds700_misc_prng_init = lfds700_misc_prng_init
+lfds700_misc_query = lfds700_misc_query
+
+lfds700_queue_init_valid_on_current_logical_core = lfds700_queue_init_valid_on_current_logical_core
+lfds700_queue_cleanup = lfds700_queue_cleanup
+lfds700_queue_enqueue = lfds700_queue_enqueue
+lfds700_queue_dequeue = lfds700_queue_dequeue
+lfds700_queue_query = lfds700_queue_query
+
+lfds700_queue_bss_init_valid_on_current_logical_core = lfds700_queue_bss_init_valid_on_current_logical_core
+lfds700_queue_bss_cleanup = lfds700_queue_bss_cleanup
+lfds700_queue_bss_enqueue = lfds700_queue_bss_enqueue
+lfds700_queue_bss_dequeue = lfds700_queue_bss_dequeue
+lfds700_queue_bss_query = lfds700_queue_bss_query
+
+lfds700_ringbuffer_init_valid_on_current_logical_core = lfds700_ringbuffer_init_valid_on_current_logical_core
+lfds700_ringbuffer_cleanup = lfds700_ringbuffer_cleanup
+lfds700_ringbuffer_read = lfds700_ringbuffer_read
+lfds700_ringbuffer_write = lfds700_ringbuffer_write
+lfds700_ringbuffer_query = lfds700_ringbuffer_query
+
+lfds700_stack_init_valid_on_current_logical_core = lfds700_stack_init_valid_on_current_logical_core
+lfds700_stack_cleanup = lfds700_stack_cleanup
+lfds700_stack_push = lfds700_stack_push
+lfds700_stack_pop = lfds700_stack_pop
+lfds700_stack_query = lfds700_stack_query
+
--- /dev/null
+Microsoft Visual Studio Solution File, Format Version 12.00\r
+# Visual Studio 2013\r
+VisualStudioVersion = 12.0.40629.0\r
+MinimumVisualStudioVersion = 10.0.40219.1\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblfds700", "liblfds700.vcxproj", "{1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug DLL|ARM = Debug DLL|ARM\r
+ Debug DLL|Win32 = Debug DLL|Win32\r
+ Debug DLL|x64 = Debug DLL|x64\r
+ Debug LIB|ARM = Debug LIB|ARM\r
+ Debug LIB|Win32 = Debug LIB|Win32\r
+ Debug LIB|x64 = Debug LIB|x64\r
+ Release DLL|ARM = Release DLL|ARM\r
+ Release DLL|Win32 = Release DLL|Win32\r
+ Release DLL|x64 = Release DLL|x64\r
+ Release LIB|ARM = Release LIB|ARM\r
+ Release LIB|Win32 = Release LIB|Win32\r
+ Release LIB|x64 = Release LIB|x64\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|ARM.ActiveCfg = Debug DLL|ARM\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|ARM.Build.0 = Debug DLL|ARM\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|Win32.Build.0 = Debug DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|Win32.Deploy.0 = Debug DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|x64.ActiveCfg = Debug DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|x64.Build.0 = Debug DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|x64.Deploy.0 = Debug DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|ARM.ActiveCfg = Debug LIB|ARM\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|ARM.Build.0 = Debug LIB|ARM\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|Win32.ActiveCfg = Debug LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|Win32.Build.0 = Debug LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|Win32.Deploy.0 = Debug LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|x64.ActiveCfg = Debug LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|x64.Build.0 = Debug LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|x64.Deploy.0 = Debug LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|ARM.ActiveCfg = Release DLL|ARM\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|ARM.Build.0 = Release DLL|ARM\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|Win32.ActiveCfg = Release DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|Win32.Build.0 = Release DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|Win32.Deploy.0 = Release DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|x64.ActiveCfg = Release DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|x64.Build.0 = Release DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|x64.Deploy.0 = Release DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|ARM.ActiveCfg = Release LIB|ARM\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|ARM.Build.0 = Release LIB|ARM\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|Win32.ActiveCfg = Release LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|Win32.Build.0 = Release LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|Win32.Deploy.0 = Release LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|x64.ActiveCfg = Release LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|x64.Build.0 = Release LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|x64.Deploy.0 = Release LIB|x64\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup Label="ProjectConfigurations">\r
+ <ProjectConfiguration Include="Debug DLL|ARM">\r
+ <Configuration>Debug DLL</Configuration>\r
+ <Platform>ARM</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug DLL|Win32">\r
+ <Configuration>Debug DLL</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug DLL|x64">\r
+ <Configuration>Debug DLL</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug LIB|ARM">\r
+ <Configuration>Debug LIB</Configuration>\r
+ <Platform>ARM</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug LIB|Win32">\r
+ <Configuration>Debug LIB</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug LIB|x64">\r
+ <Configuration>Debug LIB</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release DLL|ARM">\r
+ <Configuration>Release DLL</Configuration>\r
+ <Platform>ARM</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release DLL|Win32">\r
+ <Configuration>Release DLL</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release DLL|x64">\r
+ <Configuration>Release DLL</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release LIB|ARM">\r
+ <Configuration>Release LIB</Configuration>\r
+ <Platform>ARM</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release LIB|Win32">\r
+ <Configuration>Release LIB</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release LIB|x64">\r
+ <Configuration>Release LIB</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ </ItemGroup>\r
+ <PropertyGroup Label="Globals">\r
+ <ProjectGuid>{1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}</ProjectGuid>\r
+ <Keyword>Win32Proj</Keyword>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.1</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|ARM'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.1</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.1</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|ARM'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.1</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.1</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.1</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.1</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|ARM'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.1</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.1</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|ARM'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.1</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'" Label="Configuration">\r
+ <ConfigurationType>StaticLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.1</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'" Label="Configuration">\r
+ <ConfigurationType>DynamicLibrary</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>WindowsKernelModeDriver8.1</PlatformToolset>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\r
+ <ImportGroup Label="ExtensionSettings">\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|ARM'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|ARM'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|ARM'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|ARM'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <PropertyGroup Label="UserMacros" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);$(WDKContentRoot)include\km\;$(WDKContentRoot)include\wdf\kmdf\1.11\</IncludePath>\r
+ <LibraryPath>$(VCInstallDir)lib;$(VCInstallDir)atlmfc\lib;$(WindowsSDK_LibraryPath_x86);$(WDKContentRoot)lib\wdf\kmdf\x86\</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|ARM'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath)</IncludePath>\r
+ <LibraryPath>$(WDKContentRoot)lib\$(DDKSpec)\KM\$(DDKPlatform);$(LibraryPath)</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'">\r
+ <LinkIncremental>\r
+ </LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);$(WDKContentRoot)include\km\;$(WDKContentRoot)include\wdf\kmdf\1.11\</IncludePath>\r
+ <LibraryPath>$(VCInstallDir)lib;$(VCInstallDir)atlmfc\lib;$(WindowsSDK_LibraryPath_x86);$(WDKContentRoot)lib\wdf\kmdf\x86\</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|ARM'">\r
+ <LinkIncremental />\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath)</IncludePath>\r
+ <LibraryPath>$(WDKContentRoot)lib\$(DDKSpec)\KM\$(DDKPlatform);$(LibraryPath)</LibraryPath>\r
+ <ExcludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);$(MSBuildToolsPath);$(MSBuildFrameworkToolsPath);$(MSBuild_ExecutablePath);$(VC_LibraryPath_ARM);</ExcludePath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);$(WDKContentRoot)include\km\;$(WDKContentRoot)include\wdf\kmdf\1.11\</IncludePath>\r
+ <LibraryPath>$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);;$(WDKContentRoot)lib\wdf\kmdf\x64\</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'">\r
+ <LinkIncremental>\r
+ </LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);$(WDKContentRoot)include\km\;$(WDKContentRoot)include\wdf\kmdf\1.11\</IncludePath>\r
+ <LibraryPath>$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);;$(WDKContentRoot)lib\wdf\kmdf\x64\</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);$(WDKContentRoot)include\km\;$(WDKContentRoot)include\wdf\kmdf\1.11\</IncludePath>\r
+ <LibraryPath>$(VCInstallDir)lib;$(VCInstallDir)atlmfc\lib;$(WindowsSDK_LibraryPath_x86);$(WDKContentRoot)lib\wdf\kmdf\x86\</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|ARM'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath)</IncludePath>\r
+ <LibraryPath>$(WDKContentRoot)lib\$(DDKSpec)\KM\$(DDKPlatform);$(LibraryPath)</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'">\r
+ <LinkIncremental>\r
+ </LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);$(WDKContentRoot)include\km\;$(WDKContentRoot)include\wdf\kmdf\1.11\</IncludePath>\r
+ <LibraryPath>$(VCInstallDir)lib;$(VCInstallDir)atlmfc\lib;$(WindowsSDK_LibraryPath_x86);$(WDKContentRoot)lib\wdf\kmdf\x86\</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|ARM'">\r
+ <LinkIncremental />\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath)</IncludePath>\r
+ <LibraryPath>$(WDKContentRoot)lib\$(DDKSpec)\KM\$(DDKPlatform);$(LibraryPath)</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'">\r
+ <LinkIncremental>true</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);$(WDKContentRoot)include\km\;$(WDKContentRoot)include\wdf\kmdf\1.11\</IncludePath>\r
+ <LibraryPath>$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);;$(WDKContentRoot)lib\wdf\kmdf\x64\</LibraryPath>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'">\r
+ <LinkIncremental>\r
+ </LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ <IncludePath>$(ProjectDir);$(IncludePath);$(WDKContentRoot)include\km\;$(WDKContentRoot)include\wdf\kmdf\1.11\</IncludePath>\r
+ <LibraryPath>$(VCInstallDir)lib\amd64;$(VCInstallDir)atlmfc\lib\amd64;$(WindowsSDK_LibraryPath_x64);;$(WDKContentRoot)lib\wdf\kmdf\x64\</LibraryPath>\r
+ </PropertyGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Native</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|ARM'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineARM</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Native</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Native</SubSystem>\r
+ <AdditionalDependencies>kernel32.lib;msvcrtd.lib</AdditionalDependencies>\r
+ <LinkStatus>\r
+ </LinkStatus>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <MapExports>true</MapExports>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <GenerateMapFile>true</GenerateMapFile>\r
+ <Driver>WDM</Driver>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ <ProjectReference>\r
+ <LinkLibraryDependencies>true</LinkLibraryDependencies>\r
+ </ProjectReference>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|ARM'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Native</SubSystem>\r
+ <AdditionalDependencies>kernel32.lib;msvcrtd.lib</AdditionalDependencies>\r
+ <LinkStatus>\r
+ </LinkStatus>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <MapExports>true</MapExports>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <GenerateMapFile>true</GenerateMapFile>\r
+ <Driver>WDM</Driver>\r
+ <IgnoreSpecificDefaultLibraries />\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ <ProjectReference>\r
+ <LinkLibraryDependencies>true</LinkLibraryDependencies>\r
+ </ProjectReference>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <OmitFramePointers>false</OmitFramePointers>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX64</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Native</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Debug";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <SmallerTypeCheck>true</SmallerTypeCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>true</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <OmitFramePointers>false</OmitFramePointers>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Native</SubSystem>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <MapExports>true</MapExports>\r
+ <OptimizeReferences>false</OptimizeReferences>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <AdditionalDependencies>kernel32.lib;msvcrtd.lib</AdditionalDependencies>\r
+ <GenerateMapFile>true</GenerateMapFile>\r
+ <Driver>WDM</Driver>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX64</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ </Lib>\r
+ <Lib>\r
+ <LinkTimeCodeGeneration>false</LinkTimeCodeGeneration>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Native</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|ARM'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineARM</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Native</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>false</GenerateDebugInformation>\r
+ <SubSystem>Native</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <AdditionalDependencies>kernel32.lib;msvcrt.lib</AdditionalDependencies>\r
+ <LinkStatus>\r
+ </LinkStatus>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <Driver>WDM</Driver>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ <ProjectReference>\r
+ <LinkLibraryDependencies>true</LinkLibraryDependencies>\r
+ </ProjectReference>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|ARM'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>false</GenerateDebugInformation>\r
+ <SubSystem>Native</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <AdditionalDependencies>kernel32.lib;msvcrt.lib</AdditionalDependencies>\r
+ <LinkStatus>\r
+ </LinkStatus>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <Driver>WDM</Driver>\r
+ <IgnoreSpecificDefaultLibraries />\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ <ProjectReference>\r
+ <LinkLibraryDependencies>true</LinkLibraryDependencies>\r
+ </ProjectReference>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Windows</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX64</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Native</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_LIB;LFDS700_BUILD_TYPE_STRING="Release";%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <AdditionalIncludeDirectories>\r
+ </AdditionalIncludeDirectories>\r
+ <RuntimeLibrary>MultiThreadedDLL</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <StringPooling>true</StringPooling>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>\r
+ </FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>\r
+ </EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>\r
+ </EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>\r
+ </CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <UseFullPaths>true</UseFullPaths>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <DisableSpecificWarnings>4068</DisableSpecificWarnings>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <AdditionalOptions>/kernel %(AdditionalOptions)</AdditionalOptions>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>false</GenerateDebugInformation>\r
+ <SubSystem>Native</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ <ModuleDefinitionFile>liblfds700.def</ModuleDefinitionFile>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <AdditionalDependencies>kernel32.lib;msvcrt.lib</AdditionalDependencies>\r
+ <Driver>WDM</Driver>\r
+ </Link>\r
+ <Lib>\r
+ <TreatLibWarningAsErrors>true</TreatLibWarningAsErrors>\r
+ </Lib>\r
+ <Lib>\r
+ <TargetMachine>MachineX64</TargetMachine>\r
+ </Lib>\r
+ <Lib>\r
+ <SubSystem>Console</SubSystem>\r
+ <IgnoreAllDefaultLibraries>true</IgnoreAllDefaultLibraries>\r
+ </Lib>\r
+ </ItemDefinitionGroup>\r
+ <ItemGroup>\r
+ <ClInclude Include="..\..\inc\liblfds700.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_btree_addonly_unbalanced.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_freelist.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_hash_addonly.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_list_addonly_ordered_singlylinked.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_list_addonly_singlylinked_unordered.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_misc.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_compiler.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_operating_system.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_processor.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_queue.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_queue_bounded_singleconsumer_singleproducer.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_ringbuffer.h" />\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_stack.h" />\r
+ <ClInclude Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_freelist\lfds700_freelist_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_misc\lfds700_misc_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_queue\lfds700_queue_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_internal.h" />\r
+ <ClInclude Include="..\..\src\lfds700_stack\lfds700_stack_internal.h" />\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_get.c" />\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_insert.c" />\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_pop.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_push.c" />\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_get.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_insert.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_iterate.c" />\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_get.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_insert.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_get.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_insert.c" />\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_globals.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_prng.c" />\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_dequeue.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_enqueue.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_dequeue.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_enqueue.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_query.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_read.c" />\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_write.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_cleanup.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_init.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_pop.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_push.c" />\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_query.c" />\r
+ </ItemGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
+ <ImportGroup Label="ExtensionTargets">\r
+ </ImportGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup>\r
+ <Filter Include="Source Files">\r
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>\r
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>\r
+ </Filter>\r
+ <Filter Include="Header Files">\r
+ <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>\r
+ <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>\r
+ </Filter>\r
+ <Filter Include="Resource Files">\r
+ <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>\r
+ <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav</Extensions>\r
+ </Filter>\r
+ <Filter Include="Header Files\liblfds700">\r
+ <UniqueIdentifier>{258be429-7dac-4999-b995-753aa2f0c505}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_freelist">\r
+ <UniqueIdentifier>{469abf8e-47d8-4678-bd66-7c7e65c5f52e}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_ringbuffer">\r
+ <UniqueIdentifier>{62ee141b-2acb-4555-b016-7be20a57f2bf}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_stack">\r
+ <UniqueIdentifier>{19c73b0f-25e0-4166-9093-427f1dfb4f70}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_queue">\r
+ <UniqueIdentifier>{00eb30fe-e638-4c2b-8ca1-1f09c4a0ed45}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_queue_bounded_singleconsumer_singleproducer">\r
+ <UniqueIdentifier>{6250c4d5-ac8e-4c28-93de-0954c5bed1cb}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_misc">\r
+ <UniqueIdentifier>{400ae4e9-2281-4549-b918-59d1a27a2d07}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_list_addonly_ordered_singlylinked">\r
+ <UniqueIdentifier>{c45194af-7b41-4c28-bc0e-1095ec347664}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_list_addonly_singlylinked_unordered">\r
+ <UniqueIdentifier>{8b3cbb5c-7436-429f-9b72-bae1f4721746}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_hash_addonly">\r
+ <UniqueIdentifier>{bcbadc74-1748-4696-aad7-7fdbe5614624}</UniqueIdentifier>\r
+ </Filter>\r
+ <Filter Include="Source Files\lfds700_btree_addonly_unbalanced">\r
+ <UniqueIdentifier>{0b1fafc3-817b-4c18-8eb1-121884e3a29b}</UniqueIdentifier>\r
+ </Filter>\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClInclude Include="..\..\inc\liblfds700.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_freelist\lfds700_freelist_internal.h">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_stack\lfds700_stack_internal.h">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_queue\lfds700_queue_internal.h">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_btree_addonly_unbalanced.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_freelist.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_hash_addonly.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_list_addonly_ordered_singlylinked.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_list_addonly_singlylinked_unordered.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_misc.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_compiler.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_operating_system.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_porting_abstraction_layer_processor.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_queue.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_queue_bounded_singleconsumer_singleproducer.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_ringbuffer.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\inc\liblfds700\lfds700_stack.h">\r
+ <Filter>Header Files\liblfds700</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_internal.h">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_internal.h">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_internal.h">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_misc\lfds700_misc_internal.h">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_internal.h">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_internal.h">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_internal.h">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClInclude>\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_cleanup.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_init.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_pop.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_push.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_freelist\lfds700_freelist_query.c">\r
+ <Filter>Source Files\lfds700_freelist</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_cleanup.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_init.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_pop.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_push.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_stack\lfds700_stack_query.c">\r
+ <Filter>Source Files\lfds700_stack</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_cleanup.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_dequeue.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_enqueue.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_init.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue\lfds700_queue_query.c">\r
+ <Filter>Source Files\lfds700_queue</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_cleanup.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_get.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_init.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_insert.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_iterate.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_hash_addonly\lfds700_hash_addonly_query.c">\r
+ <Filter>Source Files\lfds700_hash_addonly</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_cleanup.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_get.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_init.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_insert.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_ordered_singlylinked\lfds700_list_addonly_ordered_singlylinked_query.c">\r
+ <Filter>Source Files\lfds700_list_addonly_ordered_singlylinked</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_cleanup.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_get.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_init.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_insert.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_btree_addonly_unbalanced\lfds700_btree_addonly_unbalanced_query.c">\r
+ <Filter>Source Files\lfds700_btree_addonly_unbalanced</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_cleanup.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_globals.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_init.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_prng.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_misc\lfds700_misc_query.c">\r
+ <Filter>Source Files\lfds700_misc</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_cleanup.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_dequeue.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_enqueue.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_init.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\lfds700_queue_bounded_singleconsumer_singleproducer_query.c">\r
+ <Filter>Source Files\lfds700_queue_bounded_singleconsumer_singleproducer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_cleanup.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_get.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_init.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_insert.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_list_addonly_singlylinked_unordered\lfds700_list_addonly_singlylinked_unordered_query.c">\r
+ <Filter>Source Files\lfds700_list_addonly_singlylinked_unordered</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_cleanup.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_init.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_query.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_read.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\lfds700_ringbuffer\lfds700_ringbuffer_write.c">\r
+ <Filter>Source Files\lfds700_ringbuffer</Filter>\r
+ </ClCompile>\r
+ </ItemGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <PropertyGroup />\r
+</Project>
\ No newline at end of file
--- /dev/null
+DIRS = single_dir_for_windows_kernel
+
+
--- /dev/null
+#include "liblfds700_internal.h"
+
+
+
+
+
+/****************************************************************************/
+DRIVER_INITIALIZE DriverEntry;
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+NTSTATUS DriverEntry( struct _DRIVER_OBJECT *DriverObject, PUNICODE_STRING RegistryPath )
+{
+ return( STATUS_SUCCESS );
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+EXPORTS
+
+lfds700_btree_au_init_valid_on_current_logical_core = lfds700_btree_au_init_valid_on_current_logical_core
+lfds700_btree_au_cleanup = lfds700_btree_au_cleanup
+lfds700_btree_au_insert = lfds700_btree_au_insert
+lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position = lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position
+lfds700_btree_au_get_by_absolute_position = lfds700_btree_au_get_by_absolute_position
+lfds700_btree_au_get_by_relative_position = lfds700_btree_au_get_by_relative_position
+lfds700_btree_au_get_by_key = lfds700_btree_au_get_by_key
+lfds700_btree_au_query = lfds700_btree_au_query
+
+lfds700_freelist_init_valid_on_current_logical_core = lfds700_freelist_init_valid_on_current_logical_core
+lfds700_freelist_cleanup = lfds700_freelist_cleanup
+lfds700_freelist_push = lfds700_freelist_push
+lfds700_freelist_pop = lfds700_freelist_pop
+lfds700_freelist_query = lfds700_freelist_query
+
+lfds700_hash_a_init_valid_on_current_logical_core = lfds700_hash_a_init_valid_on_current_logical_core
+lfds700_hash_a_cleanup = lfds700_hash_a_cleanup
+lfds700_hash_a_insert = lfds700_hash_a_insert
+lfds700_hash_a_get_by_key = lfds700_hash_a_get_by_key
+lfds700_hash_a_iterate_init = lfds700_hash_a_iterate_init
+lfds700_hash_a_iterate = lfds700_hash_a_iterate
+lfds700_hash_a_query = lfds700_hash_a_query
+
+lfds700_list_aos_init_valid_on_current_logical_core = lfds700_list_aos_init_valid_on_current_logical_core
+lfds700_list_aos_cleanup = lfds700_list_aos_cleanup
+lfds700_list_aos_insert = lfds700_list_aos_insert
+lfds700_list_aos_get_by_key = lfds700_list_aos_get_by_key
+lfds700_list_aos_query = lfds700_list_aos_query
+
+lfds700_list_asu_init_valid_on_current_logical_core = lfds700_list_asu_init_valid_on_current_logical_core
+lfds700_list_asu_cleanup = lfds700_list_asu_cleanup
+lfds700_list_asu_insert_at_position = lfds700_list_asu_insert_at_position
+lfds700_list_asu_insert_at_start = lfds700_list_asu_insert_at_start
+lfds700_list_asu_insert_at_end = lfds700_list_asu_insert_at_end
+lfds700_list_asu_insert_after_element = lfds700_list_asu_insert_after_element
+lfds700_list_asu_get_by_key = lfds700_list_asu_get_by_key
+lfds700_list_asu_query = lfds700_list_asu_query
+
+lfds700_misc_library_init_valid_on_current_logical_core = lfds700_misc_library_init_valid_on_current_logical_core
+lfds700_misc_library_cleanup = lfds700_misc_library_cleanup
+lfds700_misc_prng_init = lfds700_misc_prng_init
+lfds700_misc_query = lfds700_misc_query
+
+lfds700_queue_init_valid_on_current_logical_core = lfds700_queue_init_valid_on_current_logical_core
+lfds700_queue_cleanup = lfds700_queue_cleanup
+lfds700_queue_enqueue = lfds700_queue_enqueue
+lfds700_queue_dequeue = lfds700_queue_dequeue
+lfds700_queue_query = lfds700_queue_query
+
+lfds700_queue_bss_init_valid_on_current_logical_core = lfds700_queue_bss_init_valid_on_current_logical_core
+lfds700_queue_bss_cleanup = lfds700_queue_bss_cleanup
+lfds700_queue_bss_enqueue = lfds700_queue_bss_enqueue
+lfds700_queue_bss_dequeue = lfds700_queue_bss_dequeue
+lfds700_queue_bss_query = lfds700_queue_bss_query
+
+lfds700_ringbuffer_init_valid_on_current_logical_core = lfds700_ringbuffer_init_valid_on_current_logical_core
+lfds700_ringbuffer_cleanup = lfds700_ringbuffer_cleanup
+lfds700_ringbuffer_read = lfds700_ringbuffer_read
+lfds700_ringbuffer_write = lfds700_ringbuffer_write
+lfds700_ringbuffer_query = lfds700_ringbuffer_query
+
+lfds700_stack_init_valid_on_current_logical_core = lfds700_stack_init_valid_on_current_logical_core
+lfds700_stack_cleanup = lfds700_stack_cleanup
+lfds700_stack_push = lfds700_stack_push
+lfds700_stack_pop = lfds700_stack_pop
+lfds700_stack_query = lfds700_stack_query
+
--- /dev/null
+The Windows kernel build environment is primitive and has a number
+of severe limitations; in particular, all source files must be in
+one directory and it is not possible to choose the output binary type
+(static or dynamic library) from the build command line; rather,
+a string has to be modified in a text file used by the build (!)
+
+To deal with these limitations, it is necessary for a Windows kernel
+build to run a batch file prior to building.
+
+There are two batch files, one for static library builds and the other
+for dynamic library builds.
+
+They are both idempotent; you can run them as often as you like and
+switch between them as often as you want. It's all fine; whenever
+you run one of them, it will take you from whatever state you were
+previously in, into the state you want to be in.
+
+Both batch files copy all the sources file into a single directory,
+"/src/single_dir_for_windows_kernel/".
+
+The static library batch file will then copy "/sources.static" into
+"/src/single_dir_for_windows_kernel/", which will cause a static
+library to be built.
+
+The dynamic library batch file will then copy "/sources.dynamic" into
+"/src/single_dir_for_windows_kernel/", which will cause a dynamic
+library to be built. It will also copy "src/driver_entry.c" into
+"/src/single_dir_for_windows_kernel/", since the linker requires
+the DriverEntry function to exist for dynamic libraries, even
+though it's not used.
+
+
--- /dev/null
+@echo off
+rmdir /q /s single_dir_for_windows_kernel 1>nul 2>nul
+mkdir single_dir_for_windows_kernel 1>nul 2>nul
+
+copy /y ..\..\src\lfds700_btree_addonly_unbalanced\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_freelist\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_hash_addonly\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_list_addonly_ordered_singlylinked\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_list_addonly_singlylinked_unordered\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_misc\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_queue\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_ringbuffer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_stack\* single_dir_for_windows_kernel\ 1>nul 2>nul
+
+copy /y ..\..\src\liblfds700_internal.h single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y driver_entry_renamed_to_avoid_compiler_warning.c single_dir_for_windows_kernel\driver_entry.c 1>nul 2>nul
+copy /y sources.dynamic single_dir_for_windows_kernel\sources 1>nul 2>nul
+
+echo Windows kernel dynamic library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+@echo off
+rmdir /q /s single_dir_for_windows_kernel 1>nul 2>nul
+mkdir single_dir_for_windows_kernel 1>nul 2>nul
+
+copy /y ..\..\src\lfds700_btree_addonly_unbalanced\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_freelist\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_hash_addonly\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_list_addonly_ordered_singlylinked\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_list_addonly_singlylinked_unordered\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_misc\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_queue\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_queue_bounded_singleconsumer_singleproducer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_ringbuffer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds700_stack\* single_dir_for_windows_kernel\ 1>nul 2>nul
+
+copy /y ..\..\src\liblfds700_internal.h single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y sources.static single_dir_for_windows_kernel\sources 1>nul 2>nul
+
+echo Windows kernel static library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /wd4127 /W4
+DLLDEF = ../liblfds700.def
+TARGETNAME = liblfds700
+TARGETPATH = ../../../bin/
+TARGETTYPE = EXPORT_DRIVER
+UMTYPE = nt
+USER_C_FLAGS = /D_KERNEL_MODE
+
+INCLUDES = ../../../inc/
+SOURCES = lfds700_hash_addonly_cleanup.c \
+ lfds700_hash_addonly_get.c \
+ lfds700_hash_addonly_init.c \
+ lfds700_hash_addonly_insert.c \
+ lfds700_hash_addonly_iterate.c \
+ lfds700_hash_addonly_query.c \
+ lfds700_list_addonly_ordered_singlylinked_cleanup.c \
+ lfds700_list_addonly_ordered_singlylinked_get.c \
+ lfds700_list_addonly_ordered_singlylinked_init.c \
+ lfds700_list_addonly_ordered_singlylinked_insert.c \
+ lfds700_list_addonly_ordered_singlylinked_query.c \
+ lfds700_list_addonly_singlylinked_unordered_cleanup.c \
+ lfds700_list_addonly_singlylinked_unordered_get.c \
+ lfds700_list_addonly_singlylinked_unordered_init.c \
+ lfds700_list_addonly_singlylinked_unordered_insert.c \
+ lfds700_list_addonly_singlylinked_unordered_query.c \
+ lfds700_btree_addonly_unbalanced_cleanup.c \
+ lfds700_btree_addonly_unbalanced_get.c \
+ lfds700_btree_addonly_unbalanced_init.c \
+ lfds700_btree_addonly_unbalanced_insert.c \
+ lfds700_btree_addonly_unbalanced_query.c \
+ lfds700_freelist_cleanup.c \
+ lfds700_freelist_init.c \
+ lfds700_freelist_pop.c \
+ lfds700_freelist_push.c \
+ lfds700_freelist_query.c \
+ lfds700_misc_cleanup.c \
+ lfds700_misc_globals.c \
+ lfds700_misc_init.c \
+ lfds700_misc_prng.c \
+ lfds700_misc_query.c \
+ lfds700_queue_cleanup.c \
+ lfds700_queue_dequeue.c \
+ lfds700_queue_enqueue.c \
+ lfds700_queue_init.c \
+ lfds700_queue_query.c \
+ lfds700_queue_bounded_singleconsumer_singleproducer_cleanup.c \
+ lfds700_queue_bounded_singleconsumer_singleproducer_dequeue.c \
+ lfds700_queue_bounded_singleconsumer_singleproducer_enqueue.c \
+ lfds700_queue_bounded_singleconsumer_singleproducer_init.c \
+ lfds700_queue_bounded_singleconsumer_singleproducer_query.c \
+ lfds700_ringbuffer_cleanup.c \
+ lfds700_ringbuffer_init.c \
+ lfds700_ringbuffer_query.c \
+ lfds700_ringbuffer_read.c \
+ lfds700_ringbuffer_write.c \
+ lfds700_stack_cleanup.c \
+ lfds700_stack_init.c \
+ lfds700_stack_pop.c \
+ lfds700_stack_push.c \
+ lfds700_stack_query.c \
+ driver_entry.c
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /wd4127 /W4
+TARGETNAME = liblfds700
+TARGETPATH = ../../../bin/
+TARGETTYPE = DRIVER_LIBRARY
+UMTYPE = nt
+USER_C_FLAGS = /D_KERNEL_MODE
+
+INCLUDES = ../../../inc/
+SOURCES = lfds700_hash_addonly_cleanup.c \
+ lfds700_hash_addonly_get.c \
+ lfds700_hash_addonly_init.c \
+ lfds700_hash_addonly_insert.c \
+ lfds700_hash_addonly_iterate.c \
+ lfds700_hash_addonly_query.c \
+ lfds700_list_addonly_ordered_singlylinked_cleanup.c \
+ lfds700_list_addonly_ordered_singlylinked_get.c \
+ lfds700_list_addonly_ordered_singlylinked_init.c \
+ lfds700_list_addonly_ordered_singlylinked_insert.c \
+ lfds700_list_addonly_ordered_singlylinked_query.c \
+ lfds700_list_addonly_singlylinked_unordered_cleanup.c \
+ lfds700_list_addonly_singlylinked_unordered_get.c \
+ lfds700_list_addonly_singlylinked_unordered_init.c \
+ lfds700_list_addonly_singlylinked_unordered_insert.c \
+ lfds700_list_addonly_singlylinked_unordered_query.c \
+ lfds700_btree_addonly_unbalanced_cleanup.c \
+ lfds700_btree_addonly_unbalanced_get.c \
+ lfds700_btree_addonly_unbalanced_init.c \
+ lfds700_btree_addonly_unbalanced_insert.c \
+ lfds700_btree_addonly_unbalanced_query.c \
+ lfds700_freelist_cleanup.c \
+ lfds700_freelist_init.c \
+ lfds700_freelist_pop.c \
+ lfds700_freelist_push.c \
+ lfds700_freelist_query.c \
+ lfds700_misc_cleanup.c \
+ lfds700_misc_globals.c \
+ lfds700_misc_init.c \
+ lfds700_misc_prng.c \
+ lfds700_misc_query.c \
+ lfds700_queue_cleanup.c \
+ lfds700_queue_dequeue.c \
+ lfds700_queue_enqueue.c \
+ lfds700_queue_init.c \
+ lfds700_queue_query.c \
+ lfds700_queue_bounded_singleconsumer_singleproducer_cleanup.c \
+ lfds700_queue_bounded_singleconsumer_singleproducer_dequeue.c \
+ lfds700_queue_bounded_singleconsumer_singleproducer_enqueue.c \
+ lfds700_queue_bounded_singleconsumer_singleproducer_init.c \
+ lfds700_queue_bounded_singleconsumer_singleproducer_query.c \
+ lfds700_ringbuffer_cleanup.c \
+ lfds700_ringbuffer_init.c \
+ lfds700_ringbuffer_query.c \
+ lfds700_ringbuffer_read.c \
+ lfds700_ringbuffer_write.c \
+ lfds700_stack_cleanup.c \
+ lfds700_stack_init.c \
+ lfds700_stack_pop.c \
+ lfds700_stack_push.c \
+ lfds700_stack_query.c
+
--- /dev/null
+#ifndef LIBLFDS700_H
+
+ /***** defines *****/
+ #define LIBLFDS700_H
+
+ /***** pragmas on *****/
+ #pragma warning( disable : 4324 ) // TRD : 4324 disables MSVC warnings for structure alignment padding due to alignment specifiers
+
+ #pragma prefast( disable : 28113 28182 28183, "blah" )
+
+ /***** includes *****/
+ #include "liblfds700/lfds700_porting_abstraction_layer_compiler.h"
+ #include "liblfds700/lfds700_porting_abstraction_layer_operating_system.h"
+ #include "liblfds700/lfds700_porting_abstraction_layer_processor.h"
+
+ #include "liblfds700/lfds700_misc.h" // TRD : everything after depends on misc
+ #include "liblfds700/lfds700_btree_addonly_unbalanced.h" // TRD : hash_addonly depends on btree_addonly_unbalanced
+ #include "liblfds700/lfds700_freelist.h"
+ #include "liblfds700/lfds700_hash_addonly.h"
+ #include "liblfds700/lfds700_list_addonly_ordered_singlylinked.h"
+ #include "liblfds700/lfds700_list_addonly_singlylinked_unordered.h"
+ #include "liblfds700/lfds700_queue.h"
+ #include "liblfds700/lfds700_queue_bounded_singleconsumer_singleproducer.h"
+ #include "liblfds700/lfds700_ringbuffer.h"
+ #include "liblfds700/lfds700_stack.h"
+
+ /***** pragmas off *****/
+ #pragma warning( default : 4324 )
+
+#endif
+
--- /dev/null
+/***** defines *****/
+#define LFDS700_BTREE_AU_GET_KEY_FROM_ELEMENT( btree_au_element ) ( (btree_au_element).key )
+#define LFDS700_BTREE_AU_SET_KEY_IN_ELEMENT( btree_au_element, new_key ) ( (btree_au_element).key = (void *) (lfds700_pal_uint_t) (new_key) )
+#define LFDS700_BTREE_AU_GET_VALUE_FROM_ELEMENT( btree_au_element ) ( LFDS700_MISC_BARRIER_LOAD, (btree_au_element).value )
+#define LFDS700_BTREE_AU_SET_VALUE_IN_ELEMENT( btree_au_element, new_value ) { void *local_new_value = (void *) (lfds700_pal_uint_t) (new_value); LFDS700_PAL_ATOMIC_EXCHANGE( &(btree_au_element).value, &local_new_value ); }
+#define LFDS700_BTREE_AU_GET_USER_STATE_FROM_STATE( btree_au_state ) ( (btree_au_state).user_state )
+
+/***** enums *****/
+enum lfds700_btree_au_absolute_position
+{
+ LFDS700_BTREE_AU_ABSOLUTE_POSITION_ROOT,
+ LFDS700_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE,
+ LFDS700_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE
+};
+
+enum lfds700_btree_au_existing_key
+{
+ LFDS700_BTREE_AU_EXISTING_KEY_OVERWRITE,
+ LFDS700_BTREE_AU_EXISTING_KEY_FAIL
+};
+
+enum lfds700_btree_au_insert_result
+{
+ LFDS700_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY,
+ LFDS700_BTREE_AU_INSERT_RESULT_SUCCESS_OVERWRITE,
+ LFDS700_BTREE_AU_INSERT_RESULT_SUCCESS
+};
+
+enum lfds700_btree_au_relative_position
+{
+ LFDS700_BTREE_AU_RELATIVE_POSITION_UP,
+ LFDS700_BTREE_AU_RELATIVE_POSITION_LEFT,
+ LFDS700_BTREE_AU_RELATIVE_POSITION_RIGHT,
+ LFDS700_BTREE_AU_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LFDS700_BTREE_AU_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LFDS700_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE,
+ LFDS700_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE
+};
+
+enum lfds700_btree_au_query
+{
+ LFDS700_BTREE_AU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT,
+ LFDS700_BTREE_AU_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structs *****/
+struct lfds700_btree_au_element
+{
+ struct lfds700_btree_au_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile left,
+ *volatile right,
+ *volatile up;
+
+ void LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile value;
+
+ void LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *key;
+};
+
+struct lfds700_btree_au_state
+{
+ struct lfds700_btree_au_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile root;
+
+ int LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ enum lfds700_btree_au_existing_key
+ existing_key;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void lfds700_btree_au_init_valid_on_current_logical_core( struct lfds700_btree_au_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum lfds700_btree_au_existing_key existing_key,
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds700_btree_au_cleanup( struct lfds700_btree_au_state *baus,
+ void (*element_cleanup_callback)(struct lfds700_btree_au_state *baus, struct lfds700_btree_au_element *baue) );
+
+enum lfds700_btree_au_insert_result lfds700_btree_au_insert( struct lfds700_btree_au_state *baus,
+ struct lfds700_btree_au_element *baue,
+ struct lfds700_btree_au_element **existing_baue,
+ struct lfds700_misc_prng_state *ps );
+ // TRD : if a link collides with an existing key and existing_baue is non-NULL, existing_baue is set to the existing element
+
+int lfds700_btree_au_get_by_key( struct lfds700_btree_au_state *baus,
+ void *key,
+ struct lfds700_btree_au_element **baue );
+
+int lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position( struct lfds700_btree_au_state *baus,
+ struct lfds700_btree_au_element **baue,
+ enum lfds700_btree_au_absolute_position absolute_position,
+ enum lfds700_btree_au_relative_position relative_position );
+ // TRD : if *baue is NULL, we get the element at position, otherwise we move from *baue according to direction
+
+int lfds700_btree_au_get_by_absolute_position( struct lfds700_btree_au_state *baus,
+ struct lfds700_btree_au_element **baue,
+ enum lfds700_btree_au_absolute_position absolute_position );
+
+int lfds700_btree_au_get_by_relative_position( struct lfds700_btree_au_element **baue,
+ enum lfds700_btree_au_relative_position relative_position );
+
+void lfds700_btree_au_query( struct lfds700_btree_au_state *baus,
+ enum lfds700_btree_au_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS700_FREELIST_GET_KEY_FROM_ELEMENT( freelist_element ) ( (freelist_element).key )
+#define LFDS700_FREELIST_SET_KEY_IN_ELEMENT( freelist_element, new_key ) ( (freelist_element).key = (void *) (lfds700_pal_uint_t) (new_key) )
+#define LFDS700_FREELIST_GET_VALUE_FROM_ELEMENT( freelist_element ) ( (freelist_element).value )
+#define LFDS700_FREELIST_SET_VALUE_IN_ELEMENT( freelist_element, new_value ) ( (freelist_element).value = (void *) (lfds700_pal_uint_t) (new_value) )
+#define LFDS700_FREELIST_GET_USER_STATE_FROM_STATE( freelist_state ) ( (freelist_state).user_state )
+
+/***** enums *****/
+enum lfds700_freelist_query
+{
+ LFDS700_FREELIST_QUERY_SINGLETHREADED_GET_COUNT,
+ LFDS700_FREELIST_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structures *****/
+struct lfds700_freelist_element
+{
+ struct lfds700_freelist_element
+ *volatile next;
+
+ void
+ *key,
+ *value;
+};
+
+struct lfds700_freelist_state
+{
+ struct lfds700_freelist_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile top[PAC_SIZE];
+
+ void LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *user_state;
+};
+
+/***** public prototypes *****/
+void lfds700_freelist_init_valid_on_current_logical_core( struct lfds700_freelist_state *fs, void *user_state );
+ // TRD : used in conjunction with the #define LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds700_freelist_cleanup( struct lfds700_freelist_state *fs,
+ void (*element_cleanup_callback)(struct lfds700_freelist_state *fs, struct lfds700_freelist_element *fe) );
+
+void lfds700_freelist_push( struct lfds700_freelist_state *fs,
+ struct lfds700_freelist_element *fe,
+ struct lfds700_misc_prng_state *ps );
+
+int lfds700_freelist_pop( struct lfds700_freelist_state *fs,
+ struct lfds700_freelist_element **fe,
+ struct lfds700_misc_prng_state *ps );
+
+void lfds700_freelist_query( struct lfds700_freelist_state *fs,
+ enum lfds700_freelist_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS700_HASH_A_GET_KEY_FROM_ELEMENT( hash_a_element ) ( (hash_a_element).key )
+#define LFDS700_HASH_A_SET_KEY_IN_ELEMENT( hash_a_element, new_key ) ( (hash_a_element).key = (void *) (lfds700_pal_uint_t) (new_key) )
+#define LFDS700_HASH_A_GET_VALUE_FROM_ELEMENT( hash_a_element ) ( LFDS700_MISC_BARRIER_LOAD, (hash_a_element).value )
+#define LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( hash_a_element, new_value ) { void *local_new_value = (void *) (lfds700_pal_uint_t) (new_value); LFDS700_PAL_ATOMIC_EXCHANGE( &(hash_a_element).value, &local_new_value ); }
+#define LFDS700_HASH_A_GET_USER_STATE_FROM_STATE( hash_a_state ) ( (hash_a_state).user_state )
+
+#define LFDS700_HASH_A_32BIT_HASH_FUNCTION( data, data_length_in_bytes, hash ) { \
+ lfds700_pal_uint_t \
+ loop; \
+ \
+ for( loop = 0 ; loop < (data_length_in_bytes) ; loop++ ) \
+ { \
+ (hash) += *( (char unsigned *) (data) + loop ); \
+ (hash) += ((hash) << 10); \
+ (hash) ^= ((hash) >> 6); \
+ } \
+ \
+ (hash) += ((hash) << 3); \
+ (hash) ^= ((hash) >> 11); \
+ (hash) += ((hash) << 15); \
+ }
+ /* TRD : this is the Jenkins one-at-a-time hash
+ it produces a 32 bit hash
+ http://en.wikipedia.org/wiki/Jenkins_hash_function
+
+ we ourselves do *not* initialize the value of *hash, so that
+ our caller has the option to call us multiple times, each
+ time with for example a different member of a struct, which is
+ then hashed into the existing, built-up-so-far hash value, and
+ so build up a quality hash
+ */
+
+/***** enums *****/
+enum lfds700_hash_a_existing_key
+{
+ LFDS700_HASH_A_EXISTING_KEY_OVERWRITE,
+ LFDS700_HASH_A_EXISTING_KEY_FAIL
+};
+
+enum lfds700_hash_a_insert_result
+{
+ LFDS700_HASH_A_PUT_RESULT_FAILURE_EXISTING_KEY,
+ LFDS700_HASH_A_PUT_RESULT_SUCCESS_OVERWRITE,
+ LFDS700_HASH_A_PUT_RESULT_SUCCESS
+};
+
+enum lfds700_hash_a_query
+{
+ LFDS700_HASH_A_QUERY_GET_POTENTIALLY_INACCURATE_COUNT,
+ LFDS700_HASH_A_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structs *****/
+struct lfds700_hash_a_element
+{
+ struct lfds700_btree_au_element
+ baue;
+
+ void
+ *key;
+
+ void LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile value;
+};
+
+struct lfds700_hash_a_iterate
+{
+ struct lfds700_btree_au_element
+ *baue;
+
+ struct lfds700_btree_au_state
+ *baus,
+ *baus_end;
+};
+
+struct lfds700_hash_a_state
+{
+ enum lfds700_hash_a_existing_key
+ existing_key;
+
+ int
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ lfds700_pal_uint_t
+ array_size;
+
+ struct lfds700_btree_au_state
+ *baus_array;
+
+ void
+ (*element_cleanup_callback)( struct lfds700_hash_a_state *has, struct lfds700_hash_a_element *hae ),
+ (*key_hash_function)( void const *key, lfds700_pal_uint_t *hash ),
+ *user_state;
+};
+
+/***** public prototypes *****/
+void lfds700_hash_a_init_valid_on_current_logical_core( struct lfds700_hash_a_state *has,
+ struct lfds700_btree_au_state *baus_array,
+ lfds700_pal_uint_t array_size,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void (*key_hash_function)(void const *key, lfds700_pal_uint_t *hash),
+ enum lfds700_hash_a_existing_key existing_key,
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds700_hash_a_cleanup( struct lfds700_hash_a_state *has,
+ void (*element_cleanup_function)(struct lfds700_hash_a_state *has, struct lfds700_hash_a_element *hae) );
+
+enum lfds700_hash_a_insert_result lfds700_hash_a_insert( struct lfds700_hash_a_state *has,
+ struct lfds700_hash_a_element *hae,
+ struct lfds700_hash_a_element **existing_hae,
+ struct lfds700_misc_prng_state *ps );
+ // TRD : if existing_value is not NULL and the key exists, existing_value is set to the value of the existing key
+
+int lfds700_hash_a_get_by_key( struct lfds700_hash_a_state *has,
+ void *key,
+ struct lfds700_hash_a_element **hae );
+
+void lfds700_hash_a_iterate_init( struct lfds700_hash_a_state *has, struct lfds700_hash_a_iterate *hai );
+int lfds700_hash_a_iterate( struct lfds700_hash_a_iterate *hai, struct lfds700_hash_a_element **hae );
+
+void lfds700_hash_a_query( struct lfds700_hash_a_state *has,
+ enum lfds700_hash_a_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS700_LIST_AOS_GET_START( list_aos_state ) ( LFDS700_MISC_BARRIER_LOAD, (list_aos_state).start->next )
+#define LFDS700_LIST_AOS_GET_NEXT( list_aos_element ) ( LFDS700_MISC_BARRIER_LOAD, (list_aos_element).next )
+#define LFDS700_LIST_AOS_GET_START_AND_THEN_NEXT( list_aos_state, pointer_to_list_aos_element ) ( (pointer_to_list_aos_element) == NULL ? ( (pointer_to_list_aos_element) = LFDS700_LIST_AOS_GET_START(list_aos_state) ) : ( (pointer_to_list_aos_element) = LFDS700_LIST_AOS_GET_NEXT(*(pointer_to_list_aos_element)) ) )
+#define LFDS700_LIST_AOS_GET_KEY_FROM_ELEMENT( list_aos_element ) ( (list_aos_element).key )
+#define LFDS700_LIST_AOS_SET_KEY_IN_ELEMENT( list_aos_element, new_key ) ( (list_aos_element).key = (void *) (lfds700_pal_uint_t) (new_key) )
+#define LFDS700_LIST_AOS_GET_VALUE_FROM_ELEMENT( list_aos_element ) ( LFDS700_MISC_BARRIER_LOAD, (list_aos_element).value )
+#define LFDS700_LIST_AOS_SET_VALUE_IN_ELEMENT( list_aos_element, new_value ) { void *local_new_value = (void *) (lfds700_pal_uint_t) (new_value); LFDS700_PAL_ATOMIC_EXCHANGE( &(list_aos_element).value, &local_new_value ); }
+#define LFDS700_LIST_AOS_GET_USER_STATE_FROM_STATE( list_aos_state ) ( (list_aos_state).user_state )
+
+/***** enums *****/
+enum lfds700_list_aos_existing_key
+{
+ LFDS700_LIST_AOS_EXISTING_KEY_OVERWRITE,
+ LFDS700_LIST_AOS_EXISTING_KEY_FAIL
+};
+
+enum lfds700_list_aos_insert_result
+{
+ LFDS700_LIST_AOS_INSERT_RESULT_FAILURE_EXISTING_KEY,
+ LFDS700_LIST_AOS_INSERT_RESULT_SUCCESS_OVERWRITE,
+ LFDS700_LIST_AOS_INSERT_RESULT_SUCCESS
+};
+
+enum lfds700_list_aos_query
+{
+ LFDS700_LIST_AOS_QUERY_GET_POTENTIALLY_INACCURATE_COUNT,
+ LFDS700_LIST_AOS_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structures *****/
+struct lfds700_list_aos_element
+{
+ struct lfds700_list_aos_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile next;
+
+ void LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile value;
+
+ void LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *key;
+};
+
+struct lfds700_list_aos_state
+{
+ struct lfds700_list_aos_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile start;
+
+ struct lfds700_list_aos_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ dummy_element;
+
+ int LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ enum lfds700_list_aos_existing_key
+ existing_key;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void lfds700_list_aos_init_valid_on_current_logical_core( struct lfds700_list_aos_state *laoss,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum lfds700_list_aos_existing_key existing_key,
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds700_list_aos_cleanup( struct lfds700_list_aos_state *laoss,
+ void (*element_cleanup_callback)(struct lfds700_list_aos_state *laoss, struct lfds700_list_aos_element *laose) );
+
+enum lfds700_list_aos_insert_result lfds700_list_aos_insert( struct lfds700_list_aos_state *laoss,
+ struct lfds700_list_aos_element *laose,
+ struct lfds700_list_aos_element **existing_laose,
+ struct lfds700_misc_prng_state *ps );
+
+int lfds700_list_aos_get_by_key( struct lfds700_list_aos_state *laoss,
+ void *key,
+ struct lfds700_list_aos_element **laose );
+
+void lfds700_list_aos_query( struct lfds700_list_aos_state *laoss,
+ enum lfds700_list_aos_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS700_LIST_ASU_GET_START( list_asu_state ) ( LFDS700_MISC_BARRIER_LOAD, (list_asu_state).start->next )
+#define LFDS700_LIST_ASU_GET_NEXT( list_asu_element ) ( LFDS700_MISC_BARRIER_LOAD, (list_asu_element).next )
+#define LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT( list_asu_state, pointer_to_list_asu_element ) ( (pointer_to_list_asu_element) == NULL ? ( (pointer_to_list_asu_element) = LFDS700_LIST_ASU_GET_START(list_asu_state) ) : ( (pointer_to_list_asu_element) = LFDS700_LIST_ASU_GET_NEXT(*(pointer_to_list_asu_element)) ) )
+#define LFDS700_LIST_ASU_GET_KEY_FROM_ELEMENT( list_asu_element ) ( (list_asu_element).key )
+#define LFDS700_LIST_ASU_SET_KEY_IN_ELEMENT( list_asu_element, new_key ) ( (list_asu_element).key = (void *) (lfds700_pal_uint_t) (new_key) )
+#define LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( list_asu_element ) ( LFDS700_MISC_BARRIER_LOAD, (list_asu_element).value )
+#define LFDS700_LIST_ASU_SET_VALUE_IN_ELEMENT( list_asu_element, new_value ) { void *local_new_value = (void *) (lfds700_pal_uint_t) (new_value); LFDS700_PAL_ATOMIC_EXCHANGE( &(list_asu_element).value, &local_new_value ); }
+#define LFDS700_LIST_ASU_GET_USER_STATE_FROM_STATE( list_asu_state ) ( (list_asu_state).user_state )
+
+/***** enums *****/
+enum lfds700_list_asu_position
+{
+ LFDS700_LIST_ASU_POSITION_START,
+ LFDS700_LIST_ASU_POSITION_END,
+ LFDS700_LIST_ASU_POSITION_AFTER
+};
+
+enum lfds700_list_asu_query
+{
+ LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT,
+ LFDS700_LIST_ASU_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structures *****/
+struct lfds700_list_asu_element
+{
+ struct lfds700_list_asu_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile next;
+
+ void LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile value;
+
+ void LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *key;
+};
+
+struct lfds700_list_asu_state
+{
+ struct lfds700_list_asu_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile end,
+ *volatile start;
+
+ struct lfds700_list_asu_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ dummy_element;
+
+ int LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void lfds700_list_asu_init_valid_on_current_logical_core( struct lfds700_list_asu_state *lasus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds700_list_asu_cleanup( struct lfds700_list_asu_state *lasus,
+ void (*element_cleanup_callback)(struct lfds700_list_asu_state *lasus, struct lfds700_list_asu_element *lasue) );
+
+void lfds700_list_asu_insert_at_position( struct lfds700_list_asu_state *lasus,
+ struct lfds700_list_asu_element *lasue,
+ struct lfds700_list_asu_element *lasue_predecessor,
+ enum lfds700_list_asu_position position,
+ struct lfds700_misc_prng_state *ps );
+
+void lfds700_list_asu_insert_at_start( struct lfds700_list_asu_state *lasus,
+ struct lfds700_list_asu_element *lasue,
+ struct lfds700_misc_prng_state *ps );
+
+void lfds700_list_asu_insert_at_end( struct lfds700_list_asu_state *lasus,
+ struct lfds700_list_asu_element *lasue,
+ struct lfds700_misc_prng_state *ps );
+
+void lfds700_list_asu_insert_after_element( struct lfds700_list_asu_state *lasus,
+ struct lfds700_list_asu_element *lasue,
+ struct lfds700_list_asu_element *lasue_predecessor,
+ struct lfds700_misc_prng_state *ps );
+
+int lfds700_list_asu_get_by_key( struct lfds700_list_asu_state *lasus,
+ void *key,
+ struct lfds700_list_asu_element **lasue );
+
+void lfds700_list_asu_query( struct lfds700_list_asu_state *lasus,
+ enum lfds700_list_asu_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS700_MISC_VERSION_STRING "7.0.0"
+#define LFDS700_MISC_VERSION_INTEGER 700
+
+#ifndef NULL
+ #define NULL ( (void *) 0 )
+#endif
+
+#define POINTER 0
+#define COUNTER 1
+#define PAC_SIZE 2
+
+#define LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE 0
+#define LFDS700_MISC_PRNG_MAX ( (lfds700_pal_uint_t) -1 )
+#define LFDS700_MISC_DELIBERATELY_CRASH { char *c = 0; *c = 0; }
+#define LFDS700_MISC_PRNG_SEED 0x0a34655d34c092feULL
+ /* TRD : from an on-line hardware RNG, using atmospheric noise
+ the URL eblow will generate another 16 random hex digits (e.g. a 64-bit number) and is
+ the RNG used to generate the number above (0x0a34655d34c092fe)
+ http://www.random.org/integers/?num=16&min=0&max=15&col=1&base=16&format=plain&rnd=new
+
+ this seed is a fixed seed which is used for the slow, high quality PRNG,
+ which in turn is used when thread start to generate a single high quality seed
+ for the fast, low quality PRNG used for the CAS exponential backoff
+ */
+
+#if( LFDS700_PAL_ALIGN_SINGLE_POINTER == 4 ) // TRD : any 32-bit platform
+ // TRD : PRNG is a 32-bit xorshift, numbers suggested by George Marsaglia, in his paper http://www.jstatsoft.org/v08/i14/paper
+ #define LFDS700_MISC_PRNG_GENERATE( pointer_to_lfds700_misc_prng_state ) ( (pointer_to_lfds700_misc_prng_state)->prng_state ^= (pointer_to_lfds700_misc_prng_state)->prng_state >> 13, (pointer_to_lfds700_misc_prng_state)->prng_state ^= (pointer_to_lfds700_misc_prng_state)->prng_state << 17, (pointer_to_lfds700_misc_prng_state)->prng_state ^= (pointer_to_lfds700_misc_prng_state)->prng_state >> 5 )
+#endif
+
+#if( LFDS700_PAL_ALIGN_SINGLE_POINTER == 8 ) // TRD : any 64-bit platform
+ // TRD : PRNG is 64-bit xorshift (xorshift64*), from Sebastiano Vigna (vigna at acm dot org), http://creativecommons.org/publicdomain/zero/1.0/
+ #define LFDS700_MISC_PRNG_GENERATE( pointer_to_lfds700_misc_prng_state ) ( (pointer_to_lfds700_misc_prng_state)->prng_state ^= (pointer_to_lfds700_misc_prng_state)->prng_state >> 12, (pointer_to_lfds700_misc_prng_state)->prng_state ^= (pointer_to_lfds700_misc_prng_state)->prng_state << 25, (pointer_to_lfds700_misc_prng_state)->prng_state ^= (pointer_to_lfds700_misc_prng_state)->prng_state >> 27, (pointer_to_lfds700_misc_prng_state)->prng_state *= 2685821657736338717LL )
+#endif
+
+#if( !defined LFDS700_PAL_ATOMIC_CAS )
+ #define LFDS700_PAL_NO_ATOMIC_CAS
+
+ // TRD : lfds700_pal_atom_t volatile *destination, lfds700_pal_atom_t *compare, lfds700_pal_atom_t new_destination, enum lfds700_misc_cas_strength cas_strength, char unsigned result
+
+ #define LFDS700_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ LFDS700_PAL_ASSERT( !"LFDS700_PAL_ATOMIC_CAS not implemented for this platform." ); \
+ LFDS700_MISC_DELIBERATELY_CRASH; \
+ (result) = (char unsigned) 1; \
+ }
+#endif
+
+#if( !defined LFDS700_PAL_ATOMIC_DWCAS )
+ #define LFDS700_PAL_NO_ATOMIC_DWCAS
+
+ // TRD : lfds700_pal_atom_t volatile (*destination)[2], lfds700_pal_atom_t (*compare)[2], lfds700_pal_atom_t (*new_destination)[2], enum lfds700_misc_cas_strength cas_strength, unsigned char result
+
+ #define LFDS700_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ LFDS700_PAL_ASSERT( !"LFDS700_PAL_ATOMIC_DWCAS not implemented for this platform." ); \
+ LFDS700_MISC_DELIBERATELY_CRASH; \
+ (result) = (char unsigned) 1; \
+ }
+#endif
+
+#if( !defined LFDS700_PAL_ATOMIC_EXCHANGE )
+ #define LFDS700_PAL_NO_ATOMIC_EXCHANGE
+ // TRD : lfds700_pal_atom_t volatile *destination, lfds700_pal_atom_t *exchange
+ #define LFDS700_PAL_ATOMIC_EXCHANGE( pointer_to_destination, pointer_to_exchange ) \
+ { \
+ LFDS700_PAL_ASSERT( !"LFDS700_PAL_ATOMIC_EXCHANGE not implemented for this platform." ); \
+ LFDS700_MISC_DELIBERATELY_CRASH; \
+ }
+#endif
+
+#if( defined LFDS700_PAL_NO_COMPILER_BARRIERS )
+ #define LFDS700_MISC_BARRIER_LOAD ( LFDS700_PAL_BARRIER_PROCESSOR_LOAD )
+ #define LFDS700_MISC_BARRIER_STORE ( LFDS700_PAL_BARRIER_PROCESSOR_STORE )
+ #define LFDS700_MISC_BARRIER_FULL ( LFDS700_PAL_BARRIER_PROCESSOR_FULL )
+#else
+ #define LFDS700_MISC_BARRIER_LOAD ( LFDS700_PAL_BARRIER_COMPILER_LOAD, LFDS700_PAL_BARRIER_PROCESSOR_LOAD, LFDS700_PAL_BARRIER_COMPILER_LOAD )
+ #define LFDS700_MISC_BARRIER_STORE ( LFDS700_PAL_BARRIER_COMPILER_STORE, LFDS700_PAL_BARRIER_PROCESSOR_STORE, LFDS700_PAL_BARRIER_COMPILER_STORE )
+ #define LFDS700_MISC_BARRIER_FULL ( LFDS700_PAL_BARRIER_COMPILER_FULL, LFDS700_PAL_BARRIER_PROCESSOR_FULL, LFDS700_PAL_BARRIER_COMPILER_FULL )
+#endif
+
+#define LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE LFDS700_MISC_BARRIER_LOAD
+
+#if( defined LFDS700_PAL_NO_ATOMIC_CAS )
+ #define LFDS700_MISC_ATOMIC_SUPPORT_CAS 0
+#else
+ #define LFDS700_MISC_ATOMIC_SUPPORT_CAS 1
+#endif
+
+#if( defined LFDS700_PAL_NO_ATOMIC_DWCAS )
+ #define LFDS700_MISC_ATOMIC_SUPPORT_DWCAS 0
+#else
+ #define LFDS700_MISC_ATOMIC_SUPPORT_DWCAS 1
+#endif
+
+#if( defined LFDS700_PAL_NO_ATOMIC_EXCHANGE )
+ #define LFDS700_MISC_ATOMIC_SUPPORT_EXCHANGE 0
+#else
+ #define LFDS700_MISC_ATOMIC_SUPPORT_EXCHANGE 1
+#endif
+
+/***** enums *****/
+enum lfds700_misc_cas_strength
+{
+ // TRD : yes, weak is 1 (one) - blame GCC!
+ LFDS700_MISC_CAS_STRENGTH_WEAK = 1,
+ LFDS700_MISC_CAS_STRENGTH_STRONG = 0
+};
+
+enum lfds700_misc_validity
+{
+ LFDS700_MISC_VALIDITY_VALID,
+ LFDS700_MISC_VALIDITY_INVALID_LOOP,
+ LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS,
+ LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS,
+ LFDS700_MISC_VALIDITY_INVALID_TEST_DATA,
+ LFDS700_MISC_VALIDITY_INVALID_ORDER
+};
+
+enum lfds700_misc_flag
+{
+ LFDS700_MISC_FLAG_LOWERED,
+ LFDS700_MISC_FLAG_RAISED
+};
+
+enum lfds700_misc_query
+{
+ LFDS700_MISC_QUERY_GET_EXPONENTIAL_BACKOFF_TIMESLOT_LENGTH_IN_LOOP_ITERATIONS_FOR_CAS,
+ LFDS700_MISC_QUERY_SET_EXPONENTIAL_BACKOFF_TIMESLOT_LENGTH_IN_LOOP_ITERATIONS_FOR_CAS,
+ LFDS700_MISC_QUERY_GET_EXPONENTIAL_BACKOFF_TIMESLOT_LENGTH_IN_LOOP_ITERATIONS_FOR_DWCAS,
+ LFDS700_MISC_QUERY_SET_EXPONENTIAL_BACKOFF_TIMESLOT_LENGTH_IN_LOOP_ITERATIONS_FOR_DWCAS,
+ LFDS700_MISC_QUERY_GET_BUILD_AND_VERSION_STRING
+};
+
+/***** struct *****/
+struct lfds700_misc_globals
+{
+ lfds700_pal_atom_t
+ exponential_backoff_timeslot_length_in_loop_iterations_for_cas,
+ exponential_backoff_timeslot_length_in_loop_iterations_for_dwcas;
+};
+
+struct lfds700_misc_prng_state
+{
+ lfds700_pal_uint_t
+ prng_state;
+
+ // TRD : here to be on the same cache-line as prng_state, and so all are obtained from one cache-line read
+ lfds700_pal_atom_t
+ local_copy_of_global_exponential_backoff_timeslot_length_in_loop_iterations_for_cas,
+ local_copy_of_global_exponential_backoff_timeslot_length_in_loop_iterations_for_dwcas;
+};
+
+struct lfds700_misc_validation_info
+{
+ lfds700_pal_uint_t
+ min_elements,
+ max_elements;
+};
+
+/***** externs *****/
+extern struct lfds700_misc_globals
+ lfds700_misc_globals;
+
+/***** public prototypes *****/
+void lfds700_misc_library_init_valid_on_current_logical_core( void );
+ // TRD : used in conjunction with the #define LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+void lfds700_misc_library_cleanup( void );
+
+static LFDS700_PAL_INLINE void lfds700_misc_force_store( void );
+
+void lfds700_misc_prng_init( struct lfds700_misc_prng_state *ps );
+
+void lfds700_misc_query( enum lfds700_misc_query query_type, void *query_input, void *query_output );
+
+/***** public in-line functions *****/
+#pragma prefast( disable : 28112, "blah" )
+
+static LFDS700_PAL_INLINE void lfds700_misc_force_store()
+{
+ lfds700_pal_uint_t
+ exchange = 0;
+
+ lfds700_pal_atom_t volatile LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ destination;
+
+ LFDS700_PAL_ATOMIC_EXCHANGE( &destination, &exchange );
+
+ return;
+}
+
--- /dev/null
+/****************************************************************************/
+#if( defined __GNUC__ )
+ // TRD : makes checking GCC versions much tidier
+ #define LFDS700_PAL_GCC_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ )
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _MSC_VER && _MSC_VER >= 1400 )
+
+ /* TRD : MSVC 8.0 and greater
+
+ _MSC_VER indicates Microsoft C compiler and version
+ - __declspec(align) requires 7.1 (1310)
+ - __nop requires 8.0 (1400)
+ - _ReadBarrier requires 8.0 (1400)
+ - _WriteBarrier requires 8.0 (1400)
+ - _ReadWriteBarrier requires 7.1 (1310)
+ - _InterlockedCompareExchangePointer requires 8.0 (1400)
+ - _InterlockedExchange requires 7.1 (1310)
+ - _InterlockedExchangePointer requires 8.0 (1400)
+ - _InterlockedCompareExchange64 requires 8.0 (1400) (seems to, docs unclear)
+ - _InterlockedCompareExchange128 requires 9.0 (1500)
+
+ load/store barriers are mandatory for liblfds, which means the earliest viable version of MSCV is 1400
+ strictly we could get away with 1310 and use _ReadWriteBarrier, but the difference between 1310 and 1400 is small, so WTH
+
+ _InterlockedCompareExchange128 is needed on 64-bit platforms to provide DWCAS, but DWCAS is not mandatory,
+ so we check against the compiler version - remember, any unimplemented atomic will be masked by its dummy define,
+ so everything will compile - it just means you can't use data structures which require that atomic
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_COMPILER
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_compiler.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_COMPILER
+
+ #define LFDS700_PAL_COMPILER_STRING "MSVC"
+
+ #define LFDS700_PAL_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS700_PAL_INLINE __forceinline
+
+ #define LFDS700_PAL_BARRIER_COMPILER_LOAD _ReadBarrier()
+ #define LFDS700_PAL_BARRIER_COMPILER_STORE _WriteBarrier()
+ #define LFDS700_PAL_BARRIER_COMPILER_FULL _ReadWriteBarrier()
+
+ /* TRD : there are four processors to consider;
+
+ . ARM32 (32 bit, CAS, DWCAS) (defined _M_ARM)
+ . Itanium (64 bit, CAS) (defined _M_IA64)
+ . x64 (64 bit, CAS, DWCAS) (defined _M_X64 || defined _M_AMD64)
+ . x86 (32 bit, CAS, DWCAS) (defined _M_IX86)
+
+ can't find any indications of 64-bit ARM support yet
+
+ ARM has better intrinsics than the others, as there are no-fence variants
+
+ in theory we also have to deal with 32-bit Windows on a 64-bit platform,
+ and I presume we'd see the compiler properly indicate this in its macros,
+ but this would require that we use 32-bit atomics on the 64-bit platforms,
+ while keeping 64-bit cache line lengths and so on, and this is just so
+ wierd a thing to do these days that it's not supported
+
+ note that _InterlockedCompareExchangePointer performs CAS on all processors
+ however, it is documented as being available for x86 when in fact it is not
+ so we have to #if for processor type and use the length specific intrinsics
+ */
+
+ #if( defined _M_ARM )
+ #define LFDS700_PAL_BARRIER_PROCESSOR_LOAD __dmb( _ARM_BARRIER_ISH )
+ #define LFDS700_PAL_BARRIER_PROCESSOR_STORE __dmb( _ARM_BARRIER_ISHST )
+ #define LFDS700_PAL_BARRIER_PROCESSOR_FULL __dmb( _ARM_BARRIER_ISH )
+
+ #define LFDS700_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ lfds700_pal_atom_t \
+ original_compare; \
+ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_compare) != NULL ); */ \
+ /* TRD : new_destination can be any value in its range */ \
+ /* TRD : cas_strength can be any value in its range */ \
+ /* TRD : result can be any value in its range */ \
+ \
+ original_compare = (lfds700_pal_atom_t) *(pointer_to_compare); \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ *(lfds700_pal_atom_t *) (pointer_to_compare) = (lfds700_pal_atom_t) _InterlockedCompareExchange_nf( (long volatile *) (pointer_to_destination), (long) (new_destination), (long) *(pointer_to_compare) ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ \
+ result = (char unsigned) ( original_compare == (lfds700_pal_atom_t) *(pointer_to_compare) ); \
+ }
+
+ #define LFDS700_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ __int64 \
+ original_compare; \
+ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_compare) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_new_destination) != NULL ); */ \
+ /* TRD : cas_strength can be any value in its range */ \
+ /* TRD : result can be any value in its range */ \
+ \
+ original_compare = *(__int64 *) (pointer_to_compare); \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ *(__int64 *) (pointer_to_compare) = _InterlockedCompareExchange64_nf( (__int64 volatile *) (pointer_to_destination), *(__int64 *) (pointer_to_new_destination), *(__int64 *) (pointer_to_compare) ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ \
+ result = (char unsigned) ( *(__int64 *) (pointer_to_compare) == original_compare ); \
+ }
+
+ #define LFDS700_PAL_ATOMIC_EXCHANGE( pointer_to_destination, pointer_to_exchange ) \
+ { \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_exchange) != NULL ); */ \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ *(lfds700_pal_atom_t *) (pointer_to_exchange) = (lfds700_pal_atom_t) _InterlockedExchange_nf( (int long volatile *) (pointer_to_destination), (int long) *(pointer_to_exchange) ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ }
+ #endif
+
+ #if( defined _M_IA64 )
+ #define LFDS700_PAL_BARRIER_PROCESSOR_LOAD __mf()
+ #define LFDS700_PAL_BARRIER_PROCESSOR_STORE __mf()
+ #define LFDS700_PAL_BARRIER_PROCESSOR_FULL __mf()
+
+ #define LFDS700_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ lfds700_pal_atom_t \
+ original_compare; \
+ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_compare) != NULL ); */ \
+ /* TRD : new_destination can be any value in its range */ \
+ /* TRD : cas_strength can be any value in its range */ \
+ /* TRD : result can be any value in its range */ \
+ \
+ original_compare = (lfds700_pal_atom_t) *(pointer_to_compare); \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ *(lfds700_pal_atom_t *) (pointer_to_compare) = (lfds700_pal_atom_t) _InterlockedCompareExchange64_acq( (__int64 volatile *) (pointer_to_destination), (__int64) (new_destination), (__int64) *(pointer_to_compare) ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ \
+ result = (char unsigned) ( original_compare == (lfds700_pal_atom_t) *(pointer_to_compare) ); \
+ }
+
+ #define LFDS700_PAL_ATOMIC_EXCHANGE( pointer_to_destination, pointer_to_exchange ) \
+ { \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_exchange) != NULL ); */ \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ *(lfds700_pal_atom_t *) (pointer_to_exchange) = (lfds700_pal_atom_t) _InterlockedExchange64_acq( (__int64 volatile *) (pointer_to_destination), (__int64) *(pointer_to_exchange) ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ }
+ #endif
+
+ #if( defined _M_X64 || defined _M_AMD64 )
+ #define LFDS700_PAL_BARRIER_PROCESSOR_LOAD _mm_lfence()
+ #define LFDS700_PAL_BARRIER_PROCESSOR_STORE _mm_sfence()
+ #define LFDS700_PAL_BARRIER_PROCESSOR_FULL _mm_mfence()
+
+ #define LFDS700_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ lfds700_pal_atom_t \
+ original_compare; \
+ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_compare) != NULL ); */ \
+ /* TRD : new_destination can be any value in its range */ \
+ /* TRD : cas_strength can be any value in its range */ \
+ /* TRD : result can be any value in its range */ \
+ \
+ original_compare = (lfds700_pal_atom_t) *(pointer_to_compare); \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ *(lfds700_pal_atom_t *) (pointer_to_compare) = (lfds700_pal_atom_t) _InterlockedCompareExchange64( (__int64 volatile *) (pointer_to_destination), (__int64) (new_destination), (__int64) *(pointer_to_compare) ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ \
+ result = (char unsigned) ( original_compare == (lfds700_pal_atom_t) *(pointer_to_compare) ); \
+ }
+
+ #if( _MSC_VER >= 1500 )
+ #define LFDS700_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ /* LFDS700_PAL_ASSERT( (pointer_to_new_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_compare) != NULL ); */ \
+ /* TRD : cas_strength can be any value in its range */ \
+ /* TRD : result can be any value in its range */ \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ result = (char unsigned) _InterlockedCompareExchange128( (__int64 volatile *) (pointer_to_destination), (__int64) (pointer_to_new_destination[1]), (__int64) (pointer_to_new_destination[0]), (__int64 *) (pointer_to_compare) ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ }
+ #endif
+
+ #define LFDS700_PAL_ATOMIC_EXCHANGE( pointer_to_destination, pointer_to_exchange ) \
+ { \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_exchange) != NULL ); */ \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ *(lfds700_pal_atom_t *) (pointer_to_exchange) = (lfds700_pal_atom_t) _InterlockedExchangePointer( (void * volatile *) (pointer_to_destination), (void *) *(pointer_to_exchange) ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ }
+ #endif
+
+ #if( defined _M_IX86 )
+ #define LFDS700_PAL_BARRIER_PROCESSOR_LOAD lfds700_misc_force_store()
+ #define LFDS700_PAL_BARRIER_PROCESSOR_STORE lfds700_misc_force_store()
+ #define LFDS700_PAL_BARRIER_PROCESSOR_FULL lfds700_misc_force_store()
+
+ #define LFDS700_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ lfds700_pal_atom_t \
+ original_compare; \
+ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_compare) != NULL ); */ \
+ /* TRD : new_destination can be any value in its range */ \
+ /* TRD : cas_strength can be any value in its range */ \
+ /* TRD : result can be any value in its range */ \
+ \
+ original_compare = (lfds700_pal_atom_t) *(pointer_to_compare); \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ *(lfds700_pal_atom_t *) (pointer_to_compare) = (lfds700_pal_atom_t) _InterlockedCompareExchange( (long volatile *) (pointer_to_destination), (long) (new_destination), (long) *(pointer_to_compare) ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ \
+ result = (char unsigned) ( original_compare == (lfds700_pal_atom_t) *(pointer_to_compare) ); \
+ }
+
+ #define LFDS700_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ __int64 \
+ original_compare; \
+ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_compare) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_new_destination) != NULL ); */ \
+ /* TRD : cas_strength can be any value in its range */ \
+ /* TRD : result can be any value in its range */ \
+ \
+ original_compare = *(__int64 *) (pointer_to_compare); \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ *(__int64 *) (pointer_to_compare) = _InterlockedCompareExchange64( (__int64 volatile *) (pointer_to_destination), *(__int64 *) (pointer_to_new_destination), *(__int64 *) (pointer_to_compare) ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ \
+ result = (char unsigned) ( *(__int64 *) (pointer_to_compare) == original_compare ); \
+ }
+
+ #define LFDS700_PAL_ATOMIC_EXCHANGE( pointer_to_destination, pointer_to_exchange ) \
+ { \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_exchange) != NULL ); */ \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ *(lfds700_pal_atom_t *) (pointer_to_exchange) = (lfds700_pal_atom_t) _InterlockedExchange( (int long volatile *) (pointer_to_destination), (int long) *(pointer_to_exchange) ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ }
+ #endif
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && LFDS700_PAL_GCC_VERSION >= 412 && LFDS700_PAL_GCC_VERSION < 473 )
+
+ /* TRD : GCC 4.1.2 up to 4.7.3
+
+ __GNUC__ indicates GCC
+ LFDS700_PAL_GCC_VERSION indicates which version
+ - __sync_synchronize requires 4.1.2
+
+ GCC 4.1.2 introduced the __sync_*() atomic intrinsics
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_COMPILER
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_compiler.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_COMPILER
+
+ #define LFDS700_PAL_COMPILER_STRING "GCC < 4.7.3"
+
+ #define LFDS700_PAL_ALIGN(alignment) __attribute__( (aligned(alignment)) )
+ #define LFDS700_PAL_INLINE inline
+
+ static LFDS700_PAL_INLINE void lfds700_pal_barrier_compiler( void )
+ {
+ __asm__ __volatile__ ( "" : : : "memory" );
+ }
+
+ #define LFDS700_PAL_BARRIER_COMPILER_LOAD lfds700_pal_barrier_compiler()
+ #define LFDS700_PAL_BARRIER_COMPILER_STORE lfds700_pal_barrier_compiler()
+ #define LFDS700_PAL_BARRIER_COMPILER_FULL lfds700_pal_barrier_compiler()
+
+ #define LFDS700_PAL_BARRIER_PROCESSOR_LOAD __sync_synchronize()
+ #define LFDS700_PAL_BARRIER_PROCESSOR_STORE __sync_synchronize()
+ #define LFDS700_PAL_BARRIER_PROCESSOR_FULL __sync_synchronize()
+
+ #define LFDS700_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ lfds700_pal_atom_t \
+ original_compare; \
+ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_compare) != NULL ); */ \
+ /* TRD : new_destination can be any value in its range */ \
+ /* TRD : cas_strength can be any value in its range */ \
+ /* TRD : result can be any value in its range */ \
+ \
+ original_compare = (lfds700_pal_atom_t) *(pointer_to_compare); \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ *(pointer_to_compare) = __sync_val_compare_and_swap( pointer_to_destination, *(pointer_to_compare), new_destination ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ \
+ result = (unsigned char) ( original_compare == (lfds700_pal_atom_t) *(pointer_to_compare) ); \
+ }
+
+ // TRD : ARM and x86 have DWCAS which we can get via GCC intrinsics
+ #if( defined __arm__ || defined __i686__ || defined __i586__ || defined __i486__ )
+ #define LFDS700_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ int long long unsigned \
+ original_destination; \
+ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_compare) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_new_destination) != NULL ); */ \
+ /* TRD : cas_strength can be any value in its range */ \
+ /* TRD : result can be any value in its range */ \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ original_destination = __sync_val_compare_and_swap( (int long long unsigned volatile *) (pointer_to_destination), *(int long long unsigned *) (pointer_to_compare), *(int long long unsigned *) (pointer_to_new_destination) ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ \
+ result = (char unsigned) ( original_destination == *(int long long unsigned *) (pointer_to_compare) ); \
+ \
+ *(int long long unsigned *) (pointer_to_compare) = original_destination; \
+ }
+ #endif
+
+ #define LFDS700_PAL_ATOMIC_EXCHANGE( pointer_to_destination, pointer_to_exchange ) \
+ { \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_exchange) != NULL ); */ \
+ \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ *( (lfds700_pal_atom_t *) pointer_to_exchange) = (lfds700_pal_atom_t) __sync_lock_test_and_set( pointer_to_destination, *(pointer_to_exchange) ); \
+ LFDS700_PAL_BARRIER_COMPILER_FULL; \
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && LFDS700_PAL_GCC_VERSION >= 473 )
+
+ /* TRD : GCC 4.7.3 and greater
+
+ __GNUC__ indicates GCC
+ LFDS700_PAL_GCC_VERSION indicates which version
+ - __atomic_thread_fence requires 4.7.3
+
+ GCC 4.7.3 introduced the better __atomic*() atomic intrinsics
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_COMPILER
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_compiler.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_COMPILER
+
+ #define LFDS700_PAL_COMPILER_STRING "GCC >= 4.7.3"
+
+ #define LFDS700_PAL_ALIGN(alignment) __attribute__( (aligned(alignment)) )
+ #define LFDS700_PAL_INLINE inline
+
+ // TRD : GCC >= 4.7.3 compiler barriers are built into the intrinsics
+ #define LFDS700_PAL_NO_COMPILER_BARRIERS
+
+ #define LFDS700_PAL_BARRIER_PROCESSOR_LOAD __atomic_thread_fence( __ATOMIC_ACQUIRE )
+ #define LFDS700_PAL_BARRIER_PROCESSOR_STORE __atomic_thread_fence( __ATOMIC_RELEASE )
+ #define LFDS700_PAL_BARRIER_PROCESSOR_FULL __atomic_thread_fence( __ATOMIC_ACQ_REL )
+
+ #define LFDS700_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_compare) != NULL ); */ \
+ /* TRD : new_destination can be any value in its range */ \
+ /* TRD : cas_strength can be any value in its range */ \
+ /* TRD : result can be any value in its range */ \
+ \
+ result = (char unsigned) __atomic_compare_exchange_n( pointer_to_destination, (void *) (pointer_to_compare), (new_destination), (cas_strength), __ATOMIC_RELAXED, __ATOMIC_RELAXED ); \
+ }
+
+ // TRD : ARM and x86 have DWCAS which we can get via GCC intrinsics
+ #if( defined __arm__ || defined __i686__ || defined __i586__ || defined __i486__ )
+ #define LFDS700_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_compare) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_new_destination) != NULL ); */ \
+ /* TRD : cas_strength can be any value in its range */ \
+ /* TRD : result can be any value in its range */ \
+ \
+ (result) = (char unsigned) __atomic_compare_exchange_n( (int long long unsigned volatile *) (pointer_to_destination), (int long long unsigned *) (pointer_to_compare), *(int long long unsigned *) (pointer_to_new_destination), (cas_strength), __ATOMIC_RELAXED, __ATOMIC_RELAXED ); \
+ }
+ #endif
+
+ #if( defined __x86_64__ )
+ /* TRD : __GNUC__ indicates GCC
+ - __asm__ requires GCC
+ - __volatile__ requires GCC
+ __x86_64__ indicates x64
+ - cmpxchg16b requires x64
+
+ On 64 bit platforms, unsigned long long int is 64 bit, so we must manually use cmpxchg16b,
+ as __sync_val_compare_and_swap() will only emit cmpxchg8b
+ */
+
+ // TRD : lfds700_pal_atom_t volatile (*destination)[2], lfds700_pal_atom_t (*compare)[2], lfds700_pal_atom_t (*new_destination)[2], enum lfds700_misc_cas_strength cas_strength, char unsigned result
+
+ #define LFDS700_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_compare) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_new_destination) != NULL ); */ \
+ /* TRD : cas_strength can be any value in its range */ \
+ /* TRD : result can be any value in its range */ \
+ \
+ (result) = 0; \
+ \
+ __asm__ __volatile__ \
+ ( \
+ "lock;" /* make cmpxchg16b atomic */ \
+ "cmpxchg16b %0;" /* cmpxchg16b sets ZF on success */ \
+ "setz %3;" /* if ZF set, set result to 1 */ \
+ \
+ /* output */ \
+ : "+m" (*pointer_to_destination), "+a" ((pointer_to_compare)[0]), "+d" ((pointer_to_compare)[1]), "=q" (result) \
+ \
+ /* input */ \
+ : "b" ((pointer_to_new_destination)[0]), "c" ((pointer_to_new_destination)[1]) \
+ \
+ /* clobbered */ \
+ : "cc", "memory" \
+ ); \
+ }
+ #endif
+
+ #define LFDS700_PAL_ATOMIC_EXCHANGE( pointer_to_destination, pointer_to_exchange ) \
+ { \
+ /* LFDS700_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS700_PAL_ASSERT( (pointer_to_exchange) != NULL ); */ \
+ \
+ *(pointer_to_exchange) = __atomic_exchange_n( (pointer_to_destination), *(pointer_to_exchange), __ATOMIC_RELAXED ); \
+ }
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _MSC_VER )
+ /* TRD : MSVC compiler
+
+ an unfortunately necessary hack for MSVC
+ MSVC only defines __STDC__ if /Za is given, where /Za turns off MSVC C extensions -
+ which prevents Windows header files from compiling.
+ */
+
+ #define __STDC__ 1
+ #define __STDC_HOSTED__ 1
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _MSC_VER && _MSC_VER >= 1400 && __STDC_HOSTED__ == 1 && !defined _KERNEL_MODE )
+
+ // TRD : MSVC
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_operating_system.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_OPERATING_SYSTEM
+
+ #include <assert.h>
+
+ #define LFDS700_PAL_OS_STRING "Windows"
+ #define LFDS700_PAL_ASSERT( expression ) assert( expression )
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _MSC_VER && _MSC_VER >= 1400 && defined __STDC_HOSTED__ && __STDC_HOSTED__ == 1 && defined _WIN32 && defined _KERNEL_MODE )
+
+ // TRD : MSVC, Windows kernel-mode
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_operating_system.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_OPERATING_SYSTEM
+
+ #include <assert.h>
+ #include <intrin.h>
+
+ #define LFDS700_PAL_OS_STRING "Windows"
+ #define LFDS700_PAL_ASSERT( expression ) assert( expression )
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && __STDC_HOSTED__ == 1 && !(defined __linux__ && defined _KERNEL_MODE) )
+
+ // TRD : GCC, hosted implementation (except for Linux kernel mode)
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_operating_system.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_OPERATING_SYSTEM
+
+ #include <assert.h>
+
+ #define LFDS700_PAL_OS_STRING "Embedded (hosted)"
+ #define LFDS700_PAL_ASSERT( expression ) assert( expression )
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && __STDC_HOSTED__ == 0 )
+
+ // TRD : GCC, freestanding or bare implementation
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_operating_system.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_OPERATING_SYSTEM
+
+ #define LFDS700_PAL_OS_STRING "Embedded (freestanding/bare)"
+ #define LFDS700_PAL_ASSERT( expression )
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __linux__ && defined _KERNEL_MODE )
+
+ // TRD : GCC, Linux kernel-mode
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_operating_system.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_OPERATING_SYSTEM
+
+ #include <linux/module.h>
+
+ #define LFDS700_PAL_OS_STRING "Linux"
+ #define LFDS700_PAL_ASSERT( expression ) BUG_ON( expression )
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LFDS700_PAL_PORTING_ABSTRACTION_LAYER_OPERATING_SYSTEM )
+
+ #error No matching porting abstraction layer in lfds700_porting_abstraction_layer_operating_system.h
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _MSC_VER && _MSC_VER >= 1400 && defined _M_IX86 )
+
+ /* TRD : MSVC, x86
+ x86 is CAS, so isolation is cache-line length
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long unsigned lfds700_pal_atom_t;
+ typedef int long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "x86"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 8
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 32
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 32
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _MSC_VER && _MSC_VER >= 1400 && (defined _M_X64 || defined _M_AMD64) )
+
+ /* TRD : MSVC, x64
+ x64 is CAS, so isolation is cache-line length
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long long unsigned lfds700_pal_atom_t;
+ typedef int long long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "x64"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 64
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 64
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _MSC_VER && _MSC_VER >= 1400 && defined _M_IA64 )
+
+ /* TRD : MSVC, Itanium
+ IA64 is CAS, so isolation is cache-line length
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long long unsigned lfds700_pal_atom_t;
+ typedef int long long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "IA64"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 64
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 64
+
+#endif
+
+
+
+
+
+ /****************************************************************************/
+#if( defined _MSC_VER && _MSC_VER >= 1400 && defined _M_ARM )
+
+ /* TRD : MSVC, 32-bit ARM
+
+ ARM is LL/SC and uses a reservation granule of 8 to 2048 bytes
+ so the isolation value used here is worst-case - be sure to set
+ this correctly, otherwise structures are painfully large
+ */
+
+#ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+#error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+#endif
+
+#define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long unsigned lfds700_pal_atom_t;
+ typedef int long unsigned lfds700_pal_uint_t;
+
+#define LFDS700_PAL_PROCESSOR_STRING "ARM (32-bit)"
+
+#define LFDS700_PAL_ALIGN_SINGLE_POINTER 4
+#define LFDS700_PAL_ALIGN_DOUBLE_POINTER 8
+
+#define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 32
+#define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 2048
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __arm__ )
+
+ /* TRD : GCC, 32-bit ARM
+
+ ARM is LL/SC and uses a reservation granule of 8 to 2048 bytes
+ so the isolation value used here is worst-case - be sure to set
+ this correctly, otherwise structures are painfully large
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long unsigned lfds700_pal_atom_t;
+ typedef int long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "ARM (32-bit)"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 8
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 32
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 2048
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __aarch64__ )
+
+ /* TRD : GCC, 64-bit ARM
+
+ ARM is LL/SC and uses a reservation granule of 8 to 2048 bytes
+ so the isolation value used here is worst-case - be sure to set
+ this correctly, otherwise structures are painfully large
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long long unsigned lfds700_pal_atom_t;
+ typedef int long long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "ARM (64-bit)"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 64
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 2048
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && (defined __i686__ || defined __i586__ || defined __i486__) )
+
+ /* TRD : GCC, x86
+
+ x86 is CAS, so isolation is cache-line length
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long unsigned lfds700_pal_atom_t;
+ typedef int long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "x86"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 8
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 32
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 32
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __x86_64__ )
+
+ /* TRD : GCC, x86
+
+ x64 is CAS, so isolation is cache-line length
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long long unsigned lfds700_pal_atom_t;
+ typedef int long long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "x64"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 64
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 64
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __alpha__ )
+
+ /* TRD : GCC, alpha
+
+ alpha is LL/SC, but there is only one reservation per processor,
+ so the isolation value used here is cache-line length
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long unsigned lfds700_pal_atom_t;
+ typedef int long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "alpha"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 32
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 64
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __ia64__ )
+
+ /* TRD : GCC, Itanium
+
+ Itanium is CAS, so isolation is cache-line length
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long long unsigned lfds700_pal_atom_t;
+ typedef int long long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "IA64"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 64
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 64
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __mips__ )
+
+ /* TRD : GCC, MIPS (32-bit)
+
+ MIPS is LL/SC, but there is only one reservation per processor,
+ so the isolation value used here is cache-line length
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long unsigned lfds700_pal_atom_t;
+ typedef int long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "MIPS (32-bit)"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 8
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 32
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 32
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __mips64 )
+
+ /* TRD : GCC, MIPS (64-bit)
+
+ MIPS is LL/SC, but there is only one reservation per processor,
+ so the isolation value used here is cache-line length
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long long unsigned lfds700_pal_atom_t;
+ typedef int long long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "MIPS (64-bit)"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 64
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 64
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __ppc__ )
+
+ /* TRD : GCC, POWERPC (32-bit)
+
+ POWERPC is LL/SC and uses a reservation granule but I can't find
+ canonical documentation for its size - 128 bytes seems to be the
+ largest value I've found
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long unsigned lfds700_pal_atom_t;
+ typedef int long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "POWERPC (32-bit)"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 8
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 32
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 128
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __ppc64__ )
+
+ /* TRD : GCC, POWERPC (64-bit)
+
+ POWERPC is LL/SC and uses a reservation granule but I can't find
+ canonical documentation for its size - 128 bytes seems to be the
+ largest value I've found
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long long unsigned lfds700_pal_atom_t;
+ typedef int long long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "POWERPC (64-bit)"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 64
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 128
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __sparc__ && !defined __sparc_v9__ )
+
+ /* TRD : GCC, SPARC (32-bit)
+
+ SPARC is CAS, so isolation is cache-line length
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long unsigned lfds700_pal_atom_t;
+ typedef int long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "SPARC (32-bit)"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 8
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 32
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 32
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __sparc__ && defined __sparc_v9__ )
+
+ /* TRD : GCC, SPARC (64-bit)
+
+ SPARC is CAS, so isolation is cache-line length
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long long unsigned lfds700_pal_atom_t;
+ typedef int long long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "SPARC (64-bit)"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 64
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 64
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __m68k__ )
+
+ /* TRD : GCC, 680x0
+
+ 680x0 is CAS, so isolation is cache-line length
+ */
+
+ #ifdef LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in lfds700_porting_abstraction_layer_processor.h
+ #endif
+
+ #define LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR
+
+ typedef int long unsigned lfds700_pal_atom_t;
+ typedef int long unsigned lfds700_pal_uint_t;
+
+ #define LFDS700_PAL_PROCESSOR_STRING "680x0"
+
+ #define LFDS700_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS700_PAL_ALIGN_DOUBLE_POINTER 8
+
+ #define LFDS700_PAL_CACHE_LINE_LENGTH_IN_BYTES 32
+ #define LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES 32
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LFDS700_PAL_PORTING_ABSTRACTION_LAYER_PROCESSOR )
+
+ #error No matching porting abstraction layer in lfds700_porting_abstraction_layer_processor.h
+
+#endif
+
--- /dev/null
+/***** defines *****/
+#define LFDS700_QUEUE_GET_KEY_FROM_ELEMENT( queue_element ) ( (queue_element).key )
+#define LFDS700_QUEUE_SET_KEY_IN_ELEMENT( queue_element, new_key ) ( (queue_element).key = (void *) (lfds700_pal_uint_t) (new_key) )
+#define LFDS700_QUEUE_GET_VALUE_FROM_ELEMENT( queue_element ) ( (queue_element).value )
+#define LFDS700_QUEUE_SET_VALUE_IN_ELEMENT( queue_element, new_value ) ( (queue_element).value = (void *) (lfds700_pal_uint_t) (new_value) )
+#define LFDS700_QUEUE_GET_USER_STATE_FROM_STATE( queue_state ) ( (queue_state).user_state )
+
+/***** enums *****/
+enum lfds700_queue_query
+{
+ LFDS700_QUEUE_QUERY_SINGLETHREADED_GET_COUNT,
+ LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structures *****/
+struct lfds700_queue_element
+{
+ struct lfds700_queue_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile next[PAC_SIZE];
+
+ void LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *key;
+
+ void
+ *value;
+};
+
+struct lfds700_queue_state
+{
+ struct lfds700_queue_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile enqueue[PAC_SIZE],
+ *volatile dequeue[PAC_SIZE];
+
+ void LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *user_state;
+};
+
+/***** public prototypes *****/
+void lfds700_queue_init_valid_on_current_logical_core( struct lfds700_queue_state *qs,
+ struct lfds700_queue_element *qe_dummy,
+ struct lfds700_misc_prng_state *ps,
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds700_queue_cleanup( struct lfds700_queue_state *qs,
+ void (*element_cleanup_callback)(struct lfds700_queue_state *qs, struct lfds700_queue_element *qe, enum lfds700_misc_flag dummy_element_flag) );
+
+void lfds700_queue_enqueue( struct lfds700_queue_state *qs,
+ struct lfds700_queue_element *qe,
+ struct lfds700_misc_prng_state *ps );
+
+int lfds700_queue_dequeue( struct lfds700_queue_state *qs,
+ struct lfds700_queue_element **qe,
+ struct lfds700_misc_prng_state *ps );
+
+void lfds700_queue_query( struct lfds700_queue_state *qs,
+ enum lfds700_queue_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS700_QUEUE_BSS_GET_USER_STATE_FROM_STATE( queue_bss_state ) ( (queue_bss_state).user_state )
+
+/***** enums *****/
+enum lfds700_queue_bss_query
+{
+ LFDS700_QUEUE_BSS_QUERY_GET_POTENTIALLY_INACCURATE_COUNT,
+ LFDS700_QUEUE_BSS_QUERY_VALIDATE
+};
+
+/***** structures *****/
+struct lfds700_queue_bss_element
+{
+ void
+ *volatile key,
+ *volatile value;
+};
+
+struct lfds700_queue_bss_state
+{
+ lfds700_pal_uint_t
+ number_elements,
+ mask;
+
+ lfds700_pal_uint_t volatile
+ read_index,
+ write_index;
+
+ struct lfds700_queue_bss_element
+ *element_array;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void lfds700_queue_bss_init_valid_on_current_logical_core( struct lfds700_queue_bss_state *qbsss,
+ struct lfds700_queue_bss_element *element_array,
+ lfds700_pal_uint_t number_elements,
+ void *user_state );
+ // TRD : number_elements must be a positive integer power of 2
+ // TRD : used in conjunction with the #define LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds700_queue_bss_cleanup( struct lfds700_queue_bss_state *qbsss,
+ void (*element_cleanup_callback)(struct lfds700_queue_bss_state *qbsss, void *key, void *value) );
+
+int lfds700_queue_bss_enqueue( struct lfds700_queue_bss_state *qbsss,
+ void *key,
+ void *value );
+
+int lfds700_queue_bss_dequeue( struct lfds700_queue_bss_state *qbsss,
+ void **key,
+ void **value );
+
+void lfds700_queue_bss_query( struct lfds700_queue_bss_state *qbsss,
+ enum lfds700_queue_bss_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** enums *****/
+#define LFDS700_RINGBUFFER_GET_USER_STATE_FROM_STATE( ringbuffer_state ) ( (ringbuffer_state).user_state )
+
+/***** enums *****/
+enum lfds700_ringbuffer_query
+{
+ LFDS700_RINGBUFFER_QUERY_SINGLETHREADED_GET_COUNT,
+ LFDS700_RINGBUFFER_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structures *****/
+struct lfds700_ringbuffer_element
+{
+ struct lfds700_freelist_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ fe;
+
+ struct lfds700_queue_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ qe;
+
+ struct lfds700_queue_element
+ *qe_use; // TRD : hack for 7.0.0; we need a new queue with no dummy element
+
+ void
+ *key,
+ *value;
+};
+
+struct lfds700_ringbuffer_state
+{
+ struct lfds700_freelist_state LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ fs;
+
+ struct lfds700_queue_state LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ qs;
+
+ void
+ (*element_cleanup_callback)( struct lfds700_ringbuffer_state *rs, void *key, void *value, enum lfds700_misc_flag unread_flag ),
+ *user_state;
+};
+
+/***** public prototypes *****/
+void lfds700_ringbuffer_init_valid_on_current_logical_core( struct lfds700_ringbuffer_state *rs,
+ struct lfds700_ringbuffer_element *re_array_inc_dummy,
+ lfds700_pal_uint_t number_elements,
+ struct lfds700_misc_prng_state *ps,
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds700_ringbuffer_cleanup( struct lfds700_ringbuffer_state *rs,
+ void (*element_cleanup_callback)(struct lfds700_ringbuffer_state *rs, void *key, void *value, enum lfds700_misc_flag unread_flag) );
+
+int lfds700_ringbuffer_read( struct lfds700_ringbuffer_state *rs,
+ void **key,
+ void **value,
+ struct lfds700_misc_prng_state *ps );
+
+void lfds700_ringbuffer_write( struct lfds700_ringbuffer_state *rs,
+ void *key,
+ void *value,
+ enum lfds700_misc_flag *overwrite_occurred_flag,
+ void **overwritten_key,
+ void **overwritten_value,
+ struct lfds700_misc_prng_state *ps );
+
+void lfds700_ringbuffer_query( struct lfds700_ringbuffer_state *rs,
+ enum lfds700_ringbuffer_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS700_STACK_GET_KEY_FROM_ELEMENT( stack_element ) ( (stack_element).key )
+#define LFDS700_STACK_SET_KEY_IN_ELEMENT( stack_element, new_key ) ( (stack_element).key = (void *) (lfds700_pal_uint_t) (new_key) )
+#define LFDS700_STACK_GET_VALUE_FROM_ELEMENT( stack_element ) ( (stack_element).value )
+#define LFDS700_STACK_SET_VALUE_IN_ELEMENT( stack_element, new_value ) ( (stack_element).value = (void *) (lfds700_pal_uint_t) (new_value) )
+#define LFDS700_STACK_GET_USER_STATE_FROM_STATE( stack_state ) ( (stack_state).user_state )
+
+/***** enums *****/
+enum lfds700_stack_query
+{
+ LFDS700_STACK_QUERY_SINGLETHREADED_GET_COUNT,
+ LFDS700_STACK_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structures *****/
+struct lfds700_stack_element
+{
+ struct lfds700_stack_element
+ *volatile next;
+
+ void
+ *key,
+ *value;
+};
+
+struct lfds700_stack_state
+{
+ struct lfds700_stack_element LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile top[PAC_SIZE];
+
+ void LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *user_state;
+};
+
+/***** public prototypes *****/
+void lfds700_stack_init_valid_on_current_logical_core( struct lfds700_stack_state *ss, void *user_state );
+ // TRD : used in conjunction with the #define LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds700_stack_cleanup( struct lfds700_stack_state *ss,
+ void (*element_cleanup_callback)(struct lfds700_stack_state *ss, struct lfds700_stack_element *se) );
+
+void lfds700_stack_push( struct lfds700_stack_state *ss,
+ struct lfds700_stack_element *se,
+ struct lfds700_misc_prng_state *ps );
+
+int lfds700_stack_pop( struct lfds700_stack_state *ss,
+ struct lfds700_stack_element **se,
+ struct lfds700_misc_prng_state *ps );
+
+void lfds700_stack_query( struct lfds700_stack_state *ss,
+ enum lfds700_stack_query query_type,
+ void *query_input,
+ void *query_output );
+
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_btree_addonly_unbalanced_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_btree_au_cleanup( struct lfds700_btree_au_state *baus,
+ void (*element_cleanup_callback)(struct lfds700_btree_au_state *baus, struct lfds700_btree_au_element *baue) )
+{
+ enum lfds700_btree_au_delete_action
+ delete_action = LFDS700_BTREE_AU_DELETE_SELF; // TRD : to remove compiler warning
+
+ struct lfds700_btree_au_element
+ *baue;
+
+ struct lfds700_btree_au_element
+ *temp;
+
+ LFDS700_PAL_ASSERT( baus != NULL );
+ // TRD : element_delete_function can be NULL
+
+ /* TRD : we're not lock-free now, so delete at will
+ but be iterative, so can be used in kernels (where there's little stack)
+ and be performant, since the user may be
+ creating/destroying many of these trees
+ also remember the user may be deallocating user data
+ so we cannot visit an element twice
+
+ we start at the root and iterate till we go to NULL
+ if the element has zero children, we delete it and move up to its parent
+ if the element has one child, we delete it, move its child into its place, and continue from its child
+ if the element has two children, we move left
+
+ the purpose of this is to minimize walking around the tree
+ to prevent visiting an element twice
+ while also minimizing code complexity
+ */
+
+ if( element_cleanup_callback == NULL )
+ return;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ lfds700_btree_au_get_by_absolute_position( baus, &baue, LFDS700_BTREE_AU_ABSOLUTE_POSITION_ROOT );
+
+ while( baue != NULL )
+ {
+ if( baue->left == NULL and baue->right == NULL )
+ delete_action = LFDS700_BTREE_AU_DELETE_SELF;
+
+ if( baue->left != NULL and baue->right == NULL )
+ delete_action = LFDS700_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD;
+
+ if( baue->left == NULL and baue->right != NULL )
+ delete_action = LFDS700_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD;
+
+ if( baue->left != NULL and baue->right != NULL )
+ delete_action = LFDS700_BTREE_AU_DELETE_MOVE_LEFT;
+
+ switch( delete_action )
+ {
+ case LFDS700_BTREE_AU_DELETE_SELF:
+ // TRD : if we have a parent (we could be root) set his point to us to NULL
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = NULL;
+ if( baue->up->right == baue )
+ baue->up->right = NULL;
+ }
+
+ temp = baue;
+ lfds700_btree_au_get_by_relative_position( &baue, LFDS700_BTREE_AU_RELATIVE_POSITION_UP );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LFDS700_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD:
+ baue->left->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->left;
+ if( baue->up->right == baue )
+ baue->up->right = baue->left;
+ }
+
+ temp = baue;
+ lfds700_btree_au_get_by_relative_position( &baue, LFDS700_BTREE_AU_RELATIVE_POSITION_LEFT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LFDS700_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD:
+ baue->right->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->right;
+ if( baue->up->right == baue )
+ baue->up->right = baue->right;
+ }
+
+ temp = baue;
+ lfds700_btree_au_get_by_relative_position( &baue, LFDS700_BTREE_AU_RELATIVE_POSITION_RIGHT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LFDS700_BTREE_AU_DELETE_MOVE_LEFT:
+ lfds700_btree_au_get_by_relative_position( &baue, LFDS700_BTREE_AU_RELATIVE_POSITION_LEFT );
+ break;
+ }
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_btree_addonly_unbalanced_internal.h"
+
+/***** private prototypes *****/
+static void lfds700_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct lfds700_btree_au_element **baue );
+static void lfds700_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct lfds700_btree_au_element **baue );
+
+
+
+
+
+/****************************************************************************/
+int lfds700_btree_au_get_by_key( struct lfds700_btree_au_state *baus,
+ void *key,
+ struct lfds700_btree_au_element **baue )
+{
+ int
+ compare_result = !0,
+ rv = 1;
+
+ LFDS700_PAL_ASSERT( baus != NULL );
+ // TRD : key can be NULL
+ LFDS700_PAL_ASSERT( baue != NULL );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ *baue = baus->root;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ while( *baue != NULL and compare_result != 0 )
+ {
+ compare_result = baus->key_compare_function( key, (*baue)->key );
+
+ if( compare_result < 0 )
+ {
+ *baue = (*baue)->left;
+ LFDS700_MISC_BARRIER_LOAD;
+ }
+
+ if( compare_result > 0 )
+ {
+ *baue = (*baue)->right;
+ LFDS700_MISC_BARRIER_LOAD;
+ }
+ }
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds700_btree_au_get_by_absolute_position( struct lfds700_btree_au_state *baus, struct lfds700_btree_au_element **baue, enum lfds700_btree_au_absolute_position absolute_position )
+{
+ int
+ rv = 1;
+
+ LFDS700_PAL_ASSERT( baus != NULL );
+ LFDS700_PAL_ASSERT( baue != NULL );
+ // TRD : absolute_position can be any value in its range
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ *baue = baus->root;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ switch( absolute_position )
+ {
+ case LFDS700_BTREE_AU_ABSOLUTE_POSITION_ROOT:
+ break;
+
+ case LFDS700_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ {
+ *baue = (*baue)->right;
+ LFDS700_MISC_BARRIER_LOAD;
+ }
+ break;
+
+ case LFDS700_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ {
+ *baue = (*baue)->left;
+ LFDS700_MISC_BARRIER_LOAD;
+ }
+ break;
+ }
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds700_btree_au_get_by_relative_position( struct lfds700_btree_au_element **baue, enum lfds700_btree_au_relative_position relative_position )
+{
+ int
+ rv = 1;
+
+ LFDS700_PAL_ASSERT( baue != NULL );
+ // TRD : relative_position can baue any value in its range
+
+ if( *baue == NULL )
+ return( 0 );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ switch( relative_position )
+ {
+ case LFDS700_BTREE_AU_RELATIVE_POSITION_UP:
+ *baue = (*baue)->up;
+ // TRD : no load barrier - up already existed, so is known to be safely propagated
+ break;
+
+ case LFDS700_BTREE_AU_RELATIVE_POSITION_LEFT:
+ *baue = (*baue)->left;
+ LFDS700_MISC_BARRIER_LOAD;
+ break;
+
+ case LFDS700_BTREE_AU_RELATIVE_POSITION_RIGHT:
+ *baue = (*baue)->right;
+ LFDS700_MISC_BARRIER_LOAD;
+ break;
+
+ case LFDS700_BTREE_AU_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->left;
+ if( *baue != NULL )
+ {
+ LFDS700_MISC_BARRIER_LOAD;
+ while( (*baue)->right != NULL )
+ {
+ *baue = (*baue)->right;
+ LFDS700_MISC_BARRIER_LOAD;
+ }
+ }
+ break;
+
+ case LFDS700_BTREE_AU_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->right;
+ if( *baue != NULL )
+ {
+ LFDS700_MISC_BARRIER_LOAD;
+ while( (*baue)->left != NULL )
+ {
+ *baue = (*baue)->left;
+ LFDS700_MISC_BARRIER_LOAD;
+ }
+ }
+ break;
+
+ case LFDS700_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE:
+ lfds700_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( baue );
+ break;
+
+ case LFDS700_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE:
+ lfds700_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( baue );
+ break;
+ }
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds700_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct lfds700_btree_au_element **baue )
+{
+ enum lfds700_btree_au_move
+ action = LFDS700_BTREE_AU_MOVE_INVALID;
+
+ enum lfds700_misc_flag
+ finished_flag = LFDS700_MISC_FLAG_LOWERED,
+ load_finished_flag = LFDS700_MISC_FLAG_LOWERED;
+
+ struct lfds700_btree_au_element
+ *left = NULL,
+ *right = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS700_PAL_ASSERT( baue != NULL );
+
+ /* TRD : from any given element, the next smallest element is;
+ 1. if we have a left, it's the largest element on the right branch of our left child
+ 2. if we don't have a left, and we're on the right of our parent, then it's our parent
+ 3. if we don't have a left, and we're on the left of our parent or we have no parent,
+ iterative up the tree until we find the first child who is on the right of its parent; then it's the parent
+ */
+
+ /* TRD : we need to ensure the variables we use to decide our action are self-consistent
+ to do this, we make local copies of them all
+ then, if they are all not NULL, we can know they cannot change and we can continue
+ if however any of them are NULL, they could have changed while we were reading
+ and so our variables could be non-self-consistent
+ to check for this, we issue another processor read barrier
+ and then compare our local variables with the values in the tree
+ if they all match, then we know our variable set is self-consistent
+ (even though it may now be wrong - but we will discover this when we try the atomic operation)
+ */
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ while( load_finished_flag == LFDS700_MISC_FLAG_LOWERED )
+ {
+ left = (*baue)->left;
+ right = (*baue)->right;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( left != NULL and right != NULL and (up == NULL or (up != NULL and up_left != NULL and up_right != NULL)) )
+ break;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( left == (*baue)->left and right == (*baue)->right and (up == NULL or (up != NULL and up == (*baue)->up and up_left == (*baue)->up->left and up_right == (*baue)->up->right)) )
+ load_finished_flag = LFDS700_MISC_FLAG_RAISED;
+ }
+
+ if( left != NULL )
+ action = LFDS700_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD;
+
+ if( left == NULL and up != NULL and up_right == *baue )
+ action = LFDS700_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (left == NULL and up == NULL) or (up != NULL and up_left == *baue and left == NULL) )
+ action = LFDS700_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LFDS700_BTREE_AU_MOVE_INVALID:
+ case LFDS700_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ // TRD : eliminates a compiler warning
+ break;
+
+ case LFDS700_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ *baue = left;
+ if( *baue != NULL )
+ {
+ LFDS700_MISC_BARRIER_LOAD;
+ while( (*baue)->right != NULL )
+ {
+ *baue = (*baue)->right;
+ LFDS700_MISC_BARRIER_LOAD;
+ }
+ }
+ break;
+
+ case LFDS700_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LFDS700_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LFDS700_MISC_FLAG_LOWERED )
+ {
+ load_finished_flag = LFDS700_MISC_FLAG_LOWERED;
+
+ while( load_finished_flag == LFDS700_MISC_FLAG_LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_left = (*baue)->up->left;
+
+ if( up == NULL or (up != NULL and up_left != NULL) )
+ break;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( up == (*baue)->up and up_left == (*baue)->up->left )
+ load_finished_flag = LFDS700_MISC_FLAG_RAISED;
+ }
+
+ if( *baue != NULL and up != NULL and *baue == up_left )
+ *baue = up;
+ else
+ finished_flag = LFDS700_MISC_FLAG_RAISED;
+ }
+
+ *baue = up;
+
+ /*
+
+ while( *baue != NULL and (*baue)->up != NULL and *baue == (*baue)->up->left )
+ *baue = (*baue)->up;
+
+ *baue = (*baue)->up;
+
+ */
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds700_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct lfds700_btree_au_element **baue )
+{
+ enum lfds700_btree_au_move
+ action = LFDS700_BTREE_AU_MOVE_INVALID;
+
+ enum lfds700_misc_flag
+ finished_flag = LFDS700_MISC_FLAG_LOWERED,
+ load_finished_flag = LFDS700_MISC_FLAG_LOWERED;
+
+ struct lfds700_btree_au_element
+ *left = NULL,
+ *right = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS700_PAL_ASSERT( baue != NULL );
+
+ /* TRD : from any given element, the next largest element is;
+ 1. if we have a right, it's the smallest element on the left branch of our right child
+ 2. if we don't have a right, and we're on the left of our parent, then it's our parent
+ 3. if we don't have a right, and we're on the right of our parent or we have no parent,
+ iterate up the tree until we find the first child who is on the left of its parent; then it's the parent
+ */
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ while( load_finished_flag == LFDS700_MISC_FLAG_LOWERED )
+ {
+ left = (*baue)->left;
+ right = (*baue)->right;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( left != NULL and right != NULL and (up == NULL or (up != NULL and up_left != NULL and up_right != NULL)) )
+ break;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( left == (*baue)->left and right == (*baue)->right and (up == NULL or (up != NULL and up == (*baue)->up and up_left == (*baue)->up->left and up_right == (*baue)->up->right)) )
+ load_finished_flag = LFDS700_MISC_FLAG_RAISED;
+ }
+
+ if( right != NULL )
+ action = LFDS700_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD;
+
+ if( right == NULL and up != NULL and up_left == *baue )
+ action = LFDS700_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (right == NULL and up == NULL) or (up != NULL and up_right == *baue and right == NULL) )
+ action = LFDS700_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LFDS700_BTREE_AU_MOVE_INVALID:
+ case LFDS700_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ // TRD : remove compiler warning
+ break;
+
+ case LFDS700_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ *baue = right;
+ if( *baue != NULL )
+ {
+ LFDS700_MISC_BARRIER_LOAD;
+ while( (*baue)->left != NULL )
+ {
+ *baue = (*baue)->left;
+ LFDS700_MISC_BARRIER_LOAD;
+ }
+ }
+ break;
+
+ case LFDS700_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LFDS700_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LFDS700_MISC_FLAG_LOWERED )
+ {
+ load_finished_flag = LFDS700_MISC_FLAG_LOWERED;
+
+ while( load_finished_flag == LFDS700_MISC_FLAG_LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_right = (*baue)->up->right;
+
+ if( up == NULL or (up != NULL and up_right != NULL) )
+ break;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( up == (*baue)->up and up_right == (*baue)->up->right )
+ load_finished_flag = LFDS700_MISC_FLAG_RAISED;
+ }
+
+ if( *baue != NULL and up != NULL and *baue == up_right )
+ *baue = up;
+ else
+ finished_flag = LFDS700_MISC_FLAG_RAISED;
+ }
+
+ *baue = up;
+
+ /*
+
+ while( *baue != NULL and (*baue)->up != NULL and *baue == (*baue)->up->right )
+ *baue = (*baue)->up;
+
+ *baue = (*baue)->up;
+
+ */
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position( struct lfds700_btree_au_state *baus, struct lfds700_btree_au_element **baue, enum lfds700_btree_au_absolute_position absolute_position, enum lfds700_btree_au_relative_position relative_position )
+{
+ int
+ rv;
+
+ LFDS700_PAL_ASSERT( baus != NULL );
+ LFDS700_PAL_ASSERT( baue != NULL );
+ // TRD: absolute_position can be any value in its range
+ // TRD: relative_position can be any value in its range
+
+ if( *baue == NULL )
+ rv = lfds700_btree_au_get_by_absolute_position( baus, baue, absolute_position );
+ else
+ rv = lfds700_btree_au_get_by_relative_position( baue, relative_position );
+
+ return( rv );
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_btree_addonly_unbalanced_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_btree_au_init_valid_on_current_logical_core( struct lfds700_btree_au_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum lfds700_btree_au_existing_key existing_key,
+ void *user_state )
+{
+ LFDS700_PAL_ASSERT( baus != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &baus->root % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &baus->key_compare_function % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ baus->root = NULL;
+ baus->key_compare_function = key_compare_function;
+ baus->existing_key = existing_key;
+ baus->user_state = user_state;
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_btree_addonly_unbalanced_internal.h"
+
+
+
+
+
+/****************************************************************************/
+enum lfds700_btree_au_insert_result lfds700_btree_au_insert( struct lfds700_btree_au_state *baus,
+ struct lfds700_btree_au_element *baue,
+ struct lfds700_btree_au_element **existing_baue,
+ struct lfds700_misc_prng_state *ps )
+{
+ char unsigned
+ result = 0;
+
+ int
+ compare_result = 0;
+
+ lfds700_pal_uint_t
+ backoff_iteration = LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE;
+
+ struct lfds700_btree_au_element
+ *volatile compare = NULL,
+ *volatile baue_next = NULL,
+ *volatile baue_parent = NULL,
+ *volatile baue_temp;
+
+ LFDS700_PAL_ASSERT( baus != NULL );
+ LFDS700_PAL_ASSERT( baue != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &baue->left % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &baue->right % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &baue->up % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &baue->value % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &baue->key % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ // TRD : existing_baue can be NULL
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ /* TRD : we follow a normal search for the insert node and which side to insert
+
+ the difference is that insertion may fail because someone else inserts
+ there before we do
+
+ in this case, we resume searching for the insert node from the node
+ we were attempting to insert upon
+
+ (if we attempted to insert the root node and this failed, i.e. we thought
+ the tree was empty but then it wasn't, then we start searching from the
+ new root)
+ */
+
+ baue->up = baue->left = baue->right = NULL;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ baue_temp = baus->root;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ while( result == 0 )
+ {
+ // TRD : first we find where to insert
+ while( baue_temp != NULL )
+ {
+ compare_result = baus->key_compare_function( baue->key, baue_temp->key );
+
+ if( compare_result == 0 )
+ {
+ if( existing_baue != NULL )
+ *existing_baue = baue_temp;
+
+ switch( baus->existing_key )
+ {
+ case LFDS700_BTREE_AU_EXISTING_KEY_OVERWRITE:
+ LFDS700_BTREE_AU_SET_VALUE_IN_ELEMENT( *baue_temp, baue->value );
+ return( LFDS700_BTREE_AU_INSERT_RESULT_SUCCESS_OVERWRITE );
+ break;
+
+ case LFDS700_BTREE_AU_EXISTING_KEY_FAIL:
+ return( LFDS700_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY );
+ break;
+ }
+ }
+
+ if( compare_result < 0 )
+ baue_next = baue_temp->left;
+
+ if( compare_result > 0 )
+ baue_next = baue_temp->right;
+
+ baue_parent = baue_temp;
+ baue_temp = baue_next;
+ if( baue_temp != NULL )
+ LFDS700_MISC_BARRIER_LOAD;
+ }
+
+ /* TRD : second, we actually insert
+
+ at this point baue_temp has come to NULL
+ and baue_parent is the element to insert at
+ and result of the last compare indicates
+ the direction of insertion
+
+ it may be that another tree has already inserted an element with
+ the same key as ourselves, or other elements which mean our position
+ is now wrong
+
+ in this case, it is either inserted in the position we're trying
+ to insert in now, in which case our insert will fail
+
+ or, similarly, other elements will have come in where we are,
+ and our insert will fail
+ */
+
+ if( baue_parent == NULL )
+ {
+ compare = NULL;
+ baue->up = baus->root;
+ LFDS700_MISC_BARRIER_STORE;
+ LFDS700_PAL_ATOMIC_CAS_WITH_BACKOFF( &baus->root, &compare, baue, LFDS700_MISC_CAS_STRENGTH_WEAK, result, backoff_iteration, ps );
+
+ if( result == 0 )
+ baue_temp = baus->root;
+ }
+
+ if( baue_parent != NULL )
+ {
+ if( compare_result <= 0 )
+ {
+ compare = NULL;
+ baue->up = baue_parent;
+ LFDS700_MISC_BARRIER_STORE;
+ LFDS700_PAL_ATOMIC_CAS_WITH_BACKOFF( &baue_parent->left, &compare, baue, LFDS700_MISC_CAS_STRENGTH_WEAK, result, backoff_iteration, ps );
+ }
+
+ if( compare_result > 0 )
+ {
+ compare = NULL;
+ baue->up = baue_parent;
+ LFDS700_MISC_BARRIER_STORE;
+ LFDS700_PAL_ATOMIC_CAS_WITH_BACKOFF( &baue_parent->right, &compare, baue, LFDS700_MISC_CAS_STRENGTH_WEAK, result, backoff_iteration, ps );
+ }
+
+ // TRD : if the insert fails, resume searching at the insert node
+ if( result == 0 )
+ baue_temp = baue_parent;
+ }
+ }
+
+ // TRD : if we get to here, we added (not failed or overwrite on exist) a new element
+ if( existing_baue != NULL )
+ *existing_baue = NULL;
+
+ return( LFDS700_BTREE_AU_INSERT_RESULT_SUCCESS );
+}
+
--- /dev/null
+/***** the library-wide header file *****/
+#include "../liblfds700_internal.h"
+
+/***** enums *****/
+enum lfds700_btree_au_move
+{
+ LFDS700_BTREE_AU_MOVE_INVALID,
+ LFDS700_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD,
+ LFDS700_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD,
+ LFDS700_BTREE_AU_MOVE_GET_PARENT,
+ LFDS700_BTREE_AU_MOVE_MOVE_UP_TREE
+};
+
+enum lfds700_btree_au_delete_action
+{
+ LFDS700_BTREE_AU_DELETE_SELF,
+ LFDS700_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD,
+ LFDS700_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD,
+ LFDS700_BTREE_AU_DELETE_MOVE_LEFT
+};
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_btree_addonly_unbalanced_internal.h"
+
+/***** private prototypes *****/
+static void lfds700_btree_au_internal_validate( struct lfds700_btree_au_state *abs, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_btree_au_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds700_btree_au_query( struct lfds700_btree_au_state *baus, enum lfds700_btree_au_query query_type, void *query_input, void *query_output )
+{
+ LFDS700_PAL_ASSERT( baus != NULL );
+ // TRD : query_type can be any value in its range
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS700_BTREE_AU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT:
+ {
+ struct lfds700_btree_au_element
+ *baue = NULL;
+
+ LFDS700_PAL_ASSERT( query_input == NULL );
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ *(lfds700_pal_uint_t *) query_output = 0;
+
+ while( lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position(baus, &baue, LFDS700_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS700_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ ( *(lfds700_pal_uint_t *) query_output )++;
+ }
+ break;
+
+ case LFDS700_BTREE_AU_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ lfds700_btree_au_internal_validate( baus, (struct lfds700_misc_validation_info *) query_input, (enum lfds700_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds700_btree_au_internal_validate( struct lfds700_btree_au_state *baus, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_btree_au_validity )
+{
+ lfds700_pal_uint_t
+ number_elements_from_query_tree = 0,
+ number_elements_from_walk = 0;
+
+ struct lfds700_btree_au_element
+ *baue = NULL,
+ *baue_prev = NULL;
+
+ LFDS700_PAL_ASSERT( baus!= NULL );
+ // TRD : vi can be NULL
+ LFDS700_PAL_ASSERT( lfds700_btree_au_validity != NULL );
+
+ *lfds700_btree_au_validity = LFDS700_MISC_VALIDITY_VALID;
+
+ /* TRD : validation is performed by;
+
+ performing an in-order walk
+ we should see every element is larger than the preceeding element
+ we count elements as we go along (visited elements, that is)
+ and check our tally equals the expected count
+ */
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ while( lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position(baus, &baue, LFDS700_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS700_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ // TRD : baue_prev should always be smaller than or equal to baue
+ if( baue_prev != NULL )
+ if( baus->key_compare_function(baue_prev->key, baue->key) > 0 )
+ {
+ *lfds700_btree_au_validity = LFDS700_MISC_VALIDITY_INVALID_ORDER;
+ return;
+ }
+
+ baue_prev = baue;
+ number_elements_from_walk++;
+ }
+
+ if( *lfds700_btree_au_validity == LFDS700_MISC_VALIDITY_VALID )
+ {
+ lfds700_btree_au_query( (struct lfds700_btree_au_state *) baus, LFDS700_BTREE_AU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, &number_elements_from_query_tree );
+
+ if( number_elements_from_walk > number_elements_from_query_tree )
+ *lfds700_btree_au_validity = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( number_elements_from_walk < number_elements_from_query_tree )
+ *lfds700_btree_au_validity = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+ }
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds700_btree_au_validity == LFDS700_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ lfds700_btree_au_query( (struct lfds700_btree_au_state *) baus, LFDS700_BTREE_AU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, &number_elements_from_query_tree );
+
+ if( number_elements_from_query_tree < vi->min_elements )
+ *lfds700_btree_au_validity = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements_from_query_tree > vi->max_elements )
+ *lfds700_btree_au_validity = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_freelist_cleanup( struct lfds700_freelist_state *fs,
+ void (*element_cleanup_callback)(struct lfds700_freelist_state *fs, struct lfds700_freelist_element *fe) )
+{
+ struct lfds700_freelist_element
+ *fe,
+ *fe_temp;
+
+ LFDS700_PAL_ASSERT( fs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ {
+ fe = fs->top[POINTER];
+
+ while( fe != NULL )
+ {
+ fe_temp = fe;
+ fe = fe->next;
+
+ element_cleanup_callback( fs, fe_temp );
+ }
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_freelist_init_valid_on_current_logical_core( struct lfds700_freelist_state *fs, void *user_state )
+{
+ LFDS700_PAL_ASSERT( fs != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) fs->top % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &fs->user_state % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ // TRD : user_state can be NULL
+
+ fs->top[POINTER] = NULL;
+ fs->top[COUNTER] = 0;
+
+ fs->user_state = user_state;
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds700_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds700_freelist_pop( struct lfds700_freelist_state *fs, struct lfds700_freelist_element **fe, struct lfds700_misc_prng_state *ps )
+{
+ char unsigned
+ result;
+
+ lfds700_pal_uint_t
+ backoff_iteration = LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE;
+
+ struct lfds700_freelist_element LFDS700_PAL_ALIGN(LFDS700_PAL_ALIGN_DOUBLE_POINTER)
+ *new_top[PAC_SIZE],
+ *volatile original_top[PAC_SIZE];
+
+ LFDS700_PAL_ASSERT( fs != NULL );
+ LFDS700_PAL_ASSERT( fe != NULL );
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ LFDS700_PAL_BARRIER_PROCESSOR_LOAD;
+
+ original_top[COUNTER] = fs->top[COUNTER];
+ original_top[POINTER] = fs->top[POINTER];
+
+ do
+ {
+ if( original_top[POINTER] == NULL )
+ {
+ *fe = NULL;
+ return( 0 );
+ }
+
+ new_top[COUNTER] = original_top[COUNTER] + 1;
+ new_top[POINTER] = original_top[POINTER]->next;
+
+ LFDS700_PAL_ATOMIC_DWCAS_WITH_BACKOFF( &fs->top, original_top, new_top, LFDS700_MISC_CAS_STRENGTH_WEAK, result, backoff_iteration, ps );
+
+ if( result != 1 )
+ LFDS700_PAL_BARRIER_PROCESSOR_LOAD;
+ }
+ while( result != 1 );
+
+ *fe = original_top[POINTER];
+
+ return( 1 );
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_freelist_push( struct lfds700_freelist_state *fs, struct lfds700_freelist_element *fe, struct lfds700_misc_prng_state *ps )
+{
+ char unsigned
+ result;
+
+ lfds700_pal_uint_t
+ backoff_iteration = LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE;
+
+ struct lfds700_freelist_element LFDS700_PAL_ALIGN(LFDS700_PAL_ALIGN_DOUBLE_POINTER)
+ *new_top[PAC_SIZE],
+ *volatile original_top[PAC_SIZE];
+
+ LFDS700_PAL_ASSERT( fs != NULL );
+ LFDS700_PAL_ASSERT( fe != NULL );
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ new_top[POINTER] = fe;
+
+ original_top[COUNTER] = fs->top[COUNTER];
+ original_top[POINTER] = fs->top[POINTER];
+
+ do
+ {
+ new_top[COUNTER] = original_top[COUNTER] + 1;
+ fe->next = original_top[POINTER];
+
+ LFDS700_PAL_BARRIER_PROCESSOR_STORE;
+ LFDS700_PAL_ATOMIC_DWCAS_WITH_BACKOFF( &fs->top, original_top, new_top, LFDS700_MISC_CAS_STRENGTH_WEAK, result, backoff_iteration, ps );
+ }
+ while( result != 1 );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_freelist_internal.h"
+
+/***** private prototypes *****/
+static void lfds700_freelist_internal_freelist_validate( struct lfds700_freelist_state *fs, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_freelist_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds700_freelist_query( struct lfds700_freelist_state *fs, enum lfds700_freelist_query query_type, void *query_input, void *query_output )
+{
+ struct lfds700_freelist_element
+ *fe;
+
+ LFDS700_PAL_ASSERT( fs != NULL );
+ // TRD : query_type can be any value in its range
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS700_FREELIST_QUERY_SINGLETHREADED_GET_COUNT:
+ LFDS700_PAL_ASSERT( query_input == NULL );
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ *(lfds700_pal_uint_t *) query_output = 0;
+
+ fe = (struct lfds700_freelist_element *) fs->top[POINTER];
+
+ while( fe != NULL )
+ {
+ ( *(lfds700_pal_uint_t *) query_output )++;
+ fe = (struct lfds700_freelist_element *) fe->next;
+ }
+ break;
+
+ case LFDS700_FREELIST_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ lfds700_freelist_internal_freelist_validate( fs, (struct lfds700_misc_validation_info *) query_input, (enum lfds700_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds700_freelist_internal_freelist_validate( struct lfds700_freelist_state *fs, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_freelist_validity )
+{
+ lfds700_pal_uint_t
+ number_elements = 0;
+
+ struct lfds700_freelist_element
+ *fe_slow,
+ *fe_fast;
+
+ LFDS700_PAL_ASSERT( fs != NULL );
+ // TRD : vi can be NULL
+ LFDS700_PAL_ASSERT( lfds700_freelist_validity != NULL );
+
+ *lfds700_freelist_validity = LFDS700_MISC_VALIDITY_VALID;
+
+ fe_slow = fe_fast = (struct lfds700_freelist_element *) fs->top[POINTER];
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the top of the freelist
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the freelist)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( fe_slow != NULL )
+ do
+ {
+ fe_slow = fe_slow->next;
+
+ if( fe_fast != NULL )
+ fe_fast = fe_fast->next;
+
+ if( fe_fast != NULL )
+ fe_fast = fe_fast->next;
+ }
+ while( fe_slow != NULL and fe_fast != fe_slow );
+
+ if( fe_fast != NULL and fe_slow != NULL and fe_fast == fe_slow )
+ *lfds700_freelist_validity = LFDS700_MISC_VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds700_freelist_validity == LFDS700_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ lfds700_freelist_query( fs, LFDS700_FREELIST_QUERY_SINGLETHREADED_GET_COUNT, NULL, (void *) &number_elements );
+
+ if( number_elements < vi->min_elements )
+ *lfds700_freelist_validity = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements > vi->max_elements )
+ *lfds700_freelist_validity = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_hash_addonly_internal.h"
+
+/***** private prototypes*****/
+static void btree_au_element_cleanup_function( struct lfds700_btree_au_state *baus, struct lfds700_btree_au_element *baue );
+
+
+
+
+
+/****************************************************************************/
+void lfds700_hash_a_cleanup( struct lfds700_hash_a_state *has,
+ void (*element_cleanup_callback)(struct lfds700_hash_a_state *has, struct lfds700_hash_a_element *hae) )
+{
+ lfds700_pal_uint_t
+ loop;
+
+ LFDS700_PAL_ASSERT( has != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ if( element_cleanup_callback == NULL )
+ return;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ has->element_cleanup_callback = element_cleanup_callback;
+
+ for( loop = 0 ; loop < has->array_size ; loop++ )
+ lfds700_btree_au_cleanup( has->baus_array+loop, btree_au_element_cleanup_function );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void btree_au_element_cleanup_function( struct lfds700_btree_au_state *baus, struct lfds700_btree_au_element *baue )
+{
+ struct lfds700_hash_a_state
+ *has;
+
+ struct lfds700_hash_a_element
+ *hae;
+
+ LFDS700_PAL_ASSERT( baus != NULL );
+ LFDS700_PAL_ASSERT( baue != NULL );
+
+ hae = (struct lfds700_hash_a_element *) LFDS700_BTREE_AU_GET_VALUE_FROM_ELEMENT( *baue );
+ has = (struct lfds700_hash_a_state *) LFDS700_BTREE_AU_GET_USER_STATE_FROM_STATE( *baus );
+
+ has->element_cleanup_callback( has, hae );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_hash_addonly_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds700_hash_a_get_by_key( struct lfds700_hash_a_state *has,
+ void *key,
+ struct lfds700_hash_a_element **hae )
+{
+ int
+ rv;
+
+ lfds700_pal_uint_t
+ hash = 0;
+
+ struct lfds700_btree_au_element
+ *baue;
+
+ LFDS700_PAL_ASSERT( has != NULL );
+ // TRD : key can be NULL
+ LFDS700_PAL_ASSERT( hae != NULL );
+
+ has->key_hash_function( key, &hash );
+
+ rv = lfds700_btree_au_get_by_key( has->baus_array + (hash % has->array_size), key, &baue );
+
+ if( rv == 1 )
+ *hae = LFDS700_BTREE_AU_GET_VALUE_FROM_ELEMENT( *baue );
+ else
+ *hae = NULL;
+
+ return( rv );
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_hash_addonly_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_hash_a_init_valid_on_current_logical_core( struct lfds700_hash_a_state *has,
+ struct lfds700_btree_au_state *baus_array,
+ lfds700_pal_uint_t array_size,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void (*key_hash_function)(void const *key, lfds700_pal_uint_t *hash),
+ enum lfds700_hash_a_existing_key existing_key,
+ void *user_state )
+{
+ enum lfds700_btree_au_existing_key
+ btree_au_existing_key = LFDS700_BTREE_AU_EXISTING_KEY_OVERWRITE; // TRD : for compiler warning
+
+ lfds700_pal_uint_t
+ loop;
+
+ LFDS700_PAL_ASSERT( has != NULL );
+ LFDS700_PAL_ASSERT( baus_array != NULL );
+ LFDS700_PAL_ASSERT( array_size > 0 );
+ LFDS700_PAL_ASSERT( key_compare_function != NULL );
+ LFDS700_PAL_ASSERT( key_hash_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ has->array_size = array_size;
+ has->key_compare_function = key_compare_function;
+ has->key_hash_function = key_hash_function;
+ has->existing_key = existing_key;
+ has->baus_array = baus_array;
+ has->user_state = user_state;
+
+ if( has->existing_key == LFDS700_HASH_A_EXISTING_KEY_OVERWRITE )
+ btree_au_existing_key = LFDS700_BTREE_AU_EXISTING_KEY_OVERWRITE;
+
+ if( has->existing_key == LFDS700_HASH_A_EXISTING_KEY_FAIL )
+ btree_au_existing_key = LFDS700_BTREE_AU_EXISTING_KEY_FAIL;
+
+ // TRD : since the addonly_hash atomic counts, if that flag is set, the btree_addonly_unbalanceds don't have to
+ for( loop = 0 ; loop < array_size ; loop++ )
+ lfds700_btree_au_init_valid_on_current_logical_core( has->baus_array+loop, key_compare_function, btree_au_existing_key, user_state );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_hash_addonly_internal.h"
+
+
+
+
+
+/****************************************************************************/
+enum lfds700_hash_a_insert_result lfds700_hash_a_insert( struct lfds700_hash_a_state *has,
+ struct lfds700_hash_a_element *hae,
+ struct lfds700_hash_a_element **existing_hae,
+ struct lfds700_misc_prng_state *ps )
+{
+ enum lfds700_hash_a_insert_result
+ apr = LFDS700_HASH_A_PUT_RESULT_SUCCESS;
+
+ enum lfds700_btree_au_insert_result
+ alr;
+
+ lfds700_pal_uint_t
+ hash = 0;
+
+ struct lfds700_btree_au_element
+ *existing_baue;
+
+ LFDS700_PAL_ASSERT( has != NULL );
+ LFDS700_PAL_ASSERT( hae != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &hae->value % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ // TRD : existing_hae can be NULL
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ // TRD : alignment checks
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &hae->baue % LFDS700_PAL_ALIGN_SINGLE_POINTER == 0 );
+
+ has->key_hash_function( hae->key, &hash );
+
+ LFDS700_BTREE_AU_SET_KEY_IN_ELEMENT( hae->baue, hae->key );
+ LFDS700_BTREE_AU_SET_VALUE_IN_ELEMENT( hae->baue, hae );
+
+ alr = lfds700_btree_au_insert( has->baus_array + (hash % has->array_size), &hae->baue, &existing_baue, ps );
+
+ switch( alr )
+ {
+ case LFDS700_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY:
+ if( existing_hae != NULL )
+ *existing_hae = LFDS700_BTREE_AU_GET_VALUE_FROM_ELEMENT( *existing_baue );
+
+ apr = LFDS700_HASH_A_PUT_RESULT_FAILURE_EXISTING_KEY;
+ break;
+
+ case LFDS700_BTREE_AU_INSERT_RESULT_SUCCESS_OVERWRITE:
+ apr = LFDS700_HASH_A_PUT_RESULT_SUCCESS_OVERWRITE;
+ break;
+
+ case LFDS700_BTREE_AU_INSERT_RESULT_SUCCESS:
+ apr = LFDS700_HASH_A_PUT_RESULT_SUCCESS;
+ break;
+ }
+
+ return( apr );
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds700_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_hash_addonly_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_hash_a_iterate_init( struct lfds700_hash_a_state *has, struct lfds700_hash_a_iterate *hai )
+{
+ LFDS700_PAL_ASSERT( has != NULL );
+ LFDS700_PAL_ASSERT( hai != NULL );
+
+ hai->baus = has->baus_array;
+ hai->baus_end = has->baus_array + has->array_size;
+ hai->baue = NULL;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds700_hash_a_iterate( struct lfds700_hash_a_iterate *hai, struct lfds700_hash_a_element **hae )
+{
+ enum lfds700_misc_flag
+ finished_flag = LFDS700_MISC_FLAG_LOWERED;
+
+ int
+ rv = 0;
+
+ LFDS700_PAL_ASSERT( hai != NULL );
+ LFDS700_PAL_ASSERT( hae != NULL );
+
+ while( finished_flag == LFDS700_MISC_FLAG_LOWERED )
+ {
+ lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position( hai->baus, &hai->baue, LFDS700_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS700_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE );
+
+ if( hai->baue != NULL )
+ {
+ *hae = LFDS700_BTREE_AU_GET_VALUE_FROM_ELEMENT( *hai->baue );
+ finished_flag = LFDS700_MISC_FLAG_RAISED;
+ rv = 1;
+ }
+
+ if( hai->baue == NULL )
+ if( ++hai->baus == hai->baus_end )
+ {
+ *hae = NULL;
+ finished_flag = LFDS700_MISC_FLAG_RAISED;
+ }
+ }
+
+ return( rv );
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_hash_addonly_internal.h"
+
+/***** private prototypes *****/
+static void lfds700_hash_a_internal_validate( struct lfds700_hash_a_state *has, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_hash_a_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds700_hash_a_query( struct lfds700_hash_a_state *has, enum lfds700_hash_a_query query_type, void *query_input, void *query_output )
+{
+ LFDS700_PAL_ASSERT( has != NULL );
+ // TRD : query_type can be any value in its range
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS700_HASH_A_QUERY_GET_POTENTIALLY_INACCURATE_COUNT:
+ {
+ struct lfds700_hash_a_iterate
+ ai;
+
+ struct lfds700_hash_a_element
+ *hae;
+
+ LFDS700_PAL_ASSERT( query_input == NULL );
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ *(lfds700_pal_uint_t *) query_output = 0;
+
+ lfds700_hash_a_iterate_init( has, &ai );
+
+ while( lfds700_hash_a_iterate(&ai, &hae) )
+ ( *(lfds700_pal_uint_t *) query_output )++;
+ }
+ break;
+
+ case LFDS700_HASH_A_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD: query_input can be any value in its range
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ lfds700_hash_a_internal_validate( has, (struct lfds700_misc_validation_info *) query_input, (enum lfds700_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds700_hash_a_internal_validate( struct lfds700_hash_a_state *has, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_hash_a_validity )
+{
+ lfds700_pal_uint_t
+ lfds700_hash_a_total_number_elements = 0,
+ lfds700_btree_au_total_number_elements = 0,
+ number_elements;
+
+ lfds700_pal_uint_t
+ loop;
+
+ LFDS700_PAL_ASSERT( has!= NULL );
+ // TRD : vi can be NULL
+ LFDS700_PAL_ASSERT( lfds700_hash_a_validity != NULL );
+
+ /* TRD : validate every btree_addonly_unbalanced in the addonly_hash
+ sum elements in each btree_addonly_unbalanced
+ check matches expected element counts (if vi is provided)
+ */
+
+ *lfds700_hash_a_validity = LFDS700_MISC_VALIDITY_VALID;
+
+ for( loop = 0 ; *lfds700_hash_a_validity == LFDS700_MISC_VALIDITY_VALID and loop < has->array_size ; loop++ )
+ lfds700_btree_au_query( has->baus_array+loop, LFDS700_BTREE_AU_QUERY_SINGLETHREADED_VALIDATE, NULL, (void *) lfds700_hash_a_validity );
+
+ if( *lfds700_hash_a_validity == LFDS700_MISC_VALIDITY_VALID )
+ {
+ for( loop = 0 ; loop < has->array_size ; loop++ )
+ {
+ lfds700_btree_au_query( has->baus_array+loop, LFDS700_BTREE_AU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_elements );
+ lfds700_btree_au_total_number_elements += number_elements;
+ }
+
+ // TRD : first, check btree_addonly_unbalanced total vs the addonly_hash total
+ lfds700_hash_a_query( has, LFDS700_HASH_A_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, &lfds700_hash_a_total_number_elements );
+
+ // TRD : the btree_addonly_unbalanceds are assumed to speak the truth
+ if( lfds700_hash_a_total_number_elements < lfds700_btree_au_total_number_elements )
+ *lfds700_hash_a_validity = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( lfds700_hash_a_total_number_elements > lfds700_btree_au_total_number_elements )
+ *lfds700_hash_a_validity = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ // TRD : second, if we're still valid and vi is provided, check the btree_addonly_unbalanced total against vi
+ if( *lfds700_hash_a_validity == LFDS700_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ if( lfds700_btree_au_total_number_elements < vi->min_elements )
+ *lfds700_hash_a_validity = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( lfds700_btree_au_total_number_elements > vi->max_elements )
+ *lfds700_hash_a_validity = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_list_addonly_ordered_singlylinked_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_list_aos_cleanup( struct lfds700_list_aos_state *laoss,
+ void (*element_cleanup_callback)(struct lfds700_list_aos_state *laoss, struct lfds700_list_aos_element *laose) )
+{
+ struct lfds700_list_aos_element
+ *laose,
+ *temp;
+
+ LFDS700_PAL_ASSERT( laoss != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback == NULL )
+ return;
+
+ laose = LFDS700_LIST_AOS_GET_START( *laoss );
+
+ while( laose != NULL )
+ {
+ temp = laose;
+
+ laose = LFDS700_LIST_AOS_GET_NEXT( *laose );
+
+ element_cleanup_callback( laoss, temp );
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_list_addonly_ordered_singlylinked_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds700_list_aos_get_by_key( struct lfds700_list_aos_state *laoss,
+ void *key,
+ struct lfds700_list_aos_element **laose )
+{
+ int
+ cr = !0,
+ rv = 1;
+
+ LFDS700_PAL_ASSERT( laoss != NULL );
+ LFDS700_PAL_ASSERT( key != NULL );
+ LFDS700_PAL_ASSERT( laose != NULL );
+
+ while( cr != 0 and LFDS700_LIST_AOS_GET_START_AND_THEN_NEXT(*laoss, *laose) )
+ cr = laoss->key_compare_function( key, (*laose)->key );
+
+ if( *laose == NULL )
+ rv = 0;
+
+ return( rv );
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_list_addonly_ordered_singlylinked_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_list_aos_init_valid_on_current_logical_core( struct lfds700_list_aos_state *laoss,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum lfds700_list_aos_existing_key existing_key,
+ void *user_state )
+{
+ LFDS700_PAL_ASSERT( laoss != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &laoss->start % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &laoss->dummy_element % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &laoss->key_compare_function % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ // TRD : dummy start element - makes code easier when you can always use ->next
+ laoss->start = &laoss->dummy_element;
+
+ laoss->start->next = NULL;
+ laoss->start->value = NULL;
+ laoss->key_compare_function = key_compare_function;
+ laoss->existing_key = existing_key;
+ laoss->user_state = user_state;
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_list_addonly_ordered_singlylinked_internal.h"
+
+
+
+
+
+/****************************************************************************/
+enum lfds700_list_aos_insert_result lfds700_list_aos_insert( struct lfds700_list_aos_state *laoss,
+ struct lfds700_list_aos_element *laose,
+ struct lfds700_list_aos_element **existing_laose,
+ struct lfds700_misc_prng_state *ps )
+{
+ char unsigned
+ result;
+
+ enum lfds700_misc_flag
+ finished_flag = LFDS700_MISC_FLAG_LOWERED;
+
+ int
+ compare_result = 0;
+
+ lfds700_pal_uint_t
+ backoff_iteration = LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE;
+
+ struct lfds700_list_aos_element
+ *volatile laose_temp = NULL,
+ *volatile laose_trailing;
+
+ LFDS700_PAL_ASSERT( laoss != NULL );
+ LFDS700_PAL_ASSERT( laose != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &laose->next % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &laose->value % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &laose->key % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ // TRD : existing_laose can be NULL
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ /* TRD : imagine a list, sorted small to large
+
+ we arrive at an element
+ we obtain its next pointer
+ we check we are greater than the current element and smaller than the next element
+ this means we have found the correct location to insert
+ we try to CAS ourselves in; in the meantime,
+ someone else has *aready* swapped in an element which is smaller than we are
+
+ e.g.
+
+ the list is { 1, 10 } and we are the value 5
+
+ we arrive at 1; we check the next element and see it is 10
+ so we are larger than the current element and smaller than the next
+ we are in the correct location to insert and we go to insert...
+
+ in the meantime, someone else with the value 3 comes along
+ he too finds this is the correct location and inserts before we do
+ the list is now { 1, 3, 10 } and we are trying to insert now after
+ 1 and before 3!
+
+ our insert CAS fails, because the next pointer of 1 has changed aready;
+ but we see we are in the wrong location - we need to move forward an
+ element
+ */
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ /* TRD : we need to begin with the leading dummy element
+ as the element to be inserted
+ may be smaller than all elements in the list
+ */
+
+ laose_trailing = laoss->start;
+ laose_temp = laoss->start->next;
+
+ while( finished_flag == LFDS700_MISC_FLAG_LOWERED )
+ {
+ if( laose_temp == NULL )
+ compare_result = -1;
+
+ if( laose_temp != NULL )
+ {
+ LFDS700_MISC_BARRIER_LOAD;
+ compare_result = laoss->key_compare_function( laose->key, laose_temp->key );
+ }
+
+ if( compare_result == 0 )
+ {
+ if( existing_laose != NULL )
+ *existing_laose = laose_temp;
+
+ switch( laoss->existing_key )
+ {
+ case LFDS700_LIST_AOS_EXISTING_KEY_OVERWRITE:
+ LFDS700_LIST_AOS_SET_VALUE_IN_ELEMENT( *laose_temp, laose->value );
+ return( LFDS700_LIST_AOS_INSERT_RESULT_SUCCESS_OVERWRITE );
+ break;
+
+ case LFDS700_LIST_AOS_EXISTING_KEY_FAIL:
+ return( LFDS700_LIST_AOS_INSERT_RESULT_FAILURE_EXISTING_KEY );
+ break;
+ }
+
+ finished_flag = LFDS700_MISC_FLAG_RAISED;
+ }
+
+ if( compare_result < 0 )
+ {
+ laose->next = laose_temp;
+ LFDS700_MISC_BARRIER_STORE;
+ LFDS700_PAL_ATOMIC_CAS_WITH_BACKOFF( &laose_trailing->next, &laose->next, laose, LFDS700_MISC_CAS_STRENGTH_WEAK, result, backoff_iteration, ps );
+
+ if( result == 1 )
+ finished_flag = LFDS700_MISC_FLAG_RAISED;
+ else
+ // TRD : if we fail to link, someone else has linked and so we need to redetermine our position is correct
+ laose_temp = laose_trailing->next;
+ }
+
+ if( compare_result > 0 )
+ {
+ // TRD : move trailing along by one element
+ laose_trailing = laose_trailing->next;
+
+ /* TRD : set temp as the element after trailing
+ if the new element we're linking is larger than all elements in the list,
+ laose_temp will now go to NULL and we'll link at the end
+ */
+ laose_temp = laose_trailing->next;
+ }
+ }
+
+ return( LFDS700_LIST_AOS_INSERT_RESULT_SUCCESS );
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds700_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_list_addonly_ordered_singlylinked_internal.h"
+
+/***** private prototypes *****/
+static void lfds700_list_aos_internal_validate( struct lfds700_list_aos_state *laoss, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_list_aos_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds700_list_aos_query( struct lfds700_list_aos_state *laoss, enum lfds700_list_aos_query query_type, void *query_input, void *query_output )
+{
+ LFDS700_PAL_ASSERT( laoss != NULL );
+ // TRD : query_type can be any value in its range
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS700_LIST_AOS_QUERY_GET_POTENTIALLY_INACCURATE_COUNT:
+ {
+ struct lfds700_list_aos_element
+ *laose = NULL;
+
+ LFDS700_PAL_ASSERT( query_input == NULL );
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ *(lfds700_pal_uint_t *) query_output = 0;
+
+ while( LFDS700_LIST_AOS_GET_START_AND_THEN_NEXT(*laoss, laose) )
+ ( *(lfds700_pal_uint_t *) query_output )++;
+ }
+ break;
+
+ case LFDS700_LIST_AOS_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ lfds700_list_aos_internal_validate( laoss, (struct lfds700_misc_validation_info *) query_input, (enum lfds700_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+
+/****************************************************************************/
+static void lfds700_list_aos_internal_validate( struct lfds700_list_aos_state *laoss, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_list_aos_validity )
+{
+ lfds700_pal_uint_t
+ number_elements = 0;
+
+ struct lfds700_list_aos_element
+ *laose_fast,
+ *laose_slow;
+
+ LFDS700_PAL_ASSERT( laoss!= NULL );
+ // TRD : vi can be NULL
+ LFDS700_PAL_ASSERT( lfds700_list_aos_validity != NULL );
+
+ *lfds700_list_aos_validity = LFDS700_MISC_VALIDITY_VALID;
+
+ laose_slow = laose_fast = laoss->start->next;
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the start of the list
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the queue)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( laose_slow != NULL )
+ do
+ {
+ laose_slow = laose_slow->next;
+
+ if( laose_fast != NULL )
+ laose_fast = laose_fast->next;
+
+ if( laose_fast != NULL )
+ laose_fast = laose_fast->next;
+ }
+ while( laose_slow != NULL and laose_fast != laose_slow );
+
+ if( laose_fast != NULL and laose_slow != NULL and laose_fast == laose_slow )
+ *lfds700_list_aos_validity = LFDS700_MISC_VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds700_list_aos_validity == LFDS700_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ lfds700_list_aos_query( laoss, LFDS700_LIST_AOS_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, &number_elements );
+
+ if( number_elements < vi->min_elements )
+ *lfds700_list_aos_validity = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements > vi->max_elements )
+ *lfds700_list_aos_validity = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_list_addonly_singlylinked_unordered_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_list_asu_cleanup( struct lfds700_list_asu_state *lasus,
+ void (*element_cleanup_callback)(struct lfds700_list_asu_state *lasus, struct lfds700_list_asu_element *lasue) )
+{
+ struct lfds700_list_asu_element
+ *lasue,
+ *temp;
+
+ LFDS700_PAL_ASSERT( lasus != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback == NULL )
+ return;
+
+ lasue = LFDS700_LIST_ASU_GET_START( *lasus );
+
+ while( lasue != NULL )
+ {
+ temp = lasue;
+
+ lasue = LFDS700_LIST_ASU_GET_NEXT( *lasue );
+
+ element_cleanup_callback( lasus, temp );
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_list_addonly_singlylinked_unordered_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds700_list_asu_get_by_key( struct lfds700_list_asu_state *lasus,
+ void *key,
+ struct lfds700_list_asu_element **lasue )
+{
+ int
+ cr = !0,
+ rv = 1;
+
+ LFDS700_PAL_ASSERT( lasus != NULL );
+ LFDS700_PAL_ASSERT( key != NULL );
+ LFDS700_PAL_ASSERT( lasue != NULL );
+
+ *lasue = NULL;
+
+ while( cr != 0 and LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*lasus, *lasue) )
+ cr = lasus->key_compare_function( key, (*lasue)->key );
+
+ if( *lasue == NULL )
+ rv = 0;
+
+ return( rv );
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_list_addonly_singlylinked_unordered_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_list_asu_init_valid_on_current_logical_core( struct lfds700_list_asu_state *lasus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *user_state )
+{
+ LFDS700_PAL_ASSERT( lasus != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasus->end % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasus->start % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasus->dummy_element % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasus->key_compare_function % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ // TRD : key_compare_function can be NULL
+ // TRD : user_state can be NULL
+
+ // TRD : dummy start element - makes code easier when you can always use ->next
+ lasus->start = lasus->end = &lasus->dummy_element;
+
+ lasus->start->next = NULL;
+ lasus->start->value = NULL;
+ lasus->key_compare_function = key_compare_function;
+ lasus->user_state = user_state;
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_list_addonly_singlylinked_unordered_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_list_asu_insert_at_position( struct lfds700_list_asu_state *lasus,
+ struct lfds700_list_asu_element *lasue,
+ struct lfds700_list_asu_element *lasue_predecessor,
+ enum lfds700_list_asu_position position,
+ struct lfds700_misc_prng_state *ps )
+{
+ LFDS700_PAL_ASSERT( lasus != NULL );
+ LFDS700_PAL_ASSERT( lasue != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasue->next % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasue->value % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasue->key % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ // TRD : lasue_predecessor asserted in the switch
+ // TRD : position can be any value in its range
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ switch( position )
+ {
+ case LFDS700_LIST_ASU_POSITION_START:
+ lfds700_list_asu_insert_at_start( lasus, lasue, ps );
+ break;
+
+ case LFDS700_LIST_ASU_POSITION_END:
+ lfds700_list_asu_insert_at_end( lasus, lasue, ps );
+ break;
+
+ case LFDS700_LIST_ASU_POSITION_AFTER:
+ lfds700_list_asu_insert_after_element( lasus, lasue, lasue_predecessor, ps );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds700_list_asu_insert_at_start( struct lfds700_list_asu_state *lasus,
+ struct lfds700_list_asu_element *lasue,
+ struct lfds700_misc_prng_state *ps )
+{
+ char unsigned
+ result;
+
+ lfds700_pal_uint_t
+ backoff_iteration = LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE;
+
+ LFDS700_PAL_ASSERT( lasus != NULL );
+ LFDS700_PAL_ASSERT( lasue != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasue->next % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasue->value % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasue->key % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ lasue->next = lasus->start->next;
+
+ do
+ {
+ LFDS700_MISC_BARRIER_STORE;
+ LFDS700_PAL_ATOMIC_CAS_WITH_BACKOFF( &lasus->start->next, &lasue->next, lasue, LFDS700_MISC_CAS_STRENGTH_WEAK, result, backoff_iteration, ps );
+ }
+ while( result != 1 );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds700_list_asu_insert_at_end( struct lfds700_list_asu_state *lasus,
+ struct lfds700_list_asu_element *lasue,
+ struct lfds700_misc_prng_state *ps )
+{
+ char unsigned
+ result;
+
+ enum lfds700_misc_flag
+ finished_flag = LFDS700_MISC_FLAG_LOWERED;
+
+ lfds700_pal_uint_t
+ backoff_iteration = LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE;
+
+ struct lfds700_list_asu_element LFDS700_PAL_ALIGN(LFDS700_PAL_ALIGN_SINGLE_POINTER)
+ *compare;
+
+ struct lfds700_list_asu_element
+ *volatile lasue_next,
+ *volatile lasue_end;
+
+ LFDS700_PAL_ASSERT( lasus != NULL );
+ LFDS700_PAL_ASSERT( lasue != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasue->next % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasue->value % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasue->key % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ /* TRD : begin by assuming end is correctly pointing to the final element
+ try to link (comparing for next being NULL)
+ if we fail, move down list till we find last element
+ and retry
+ when successful, update end to ourselves
+
+ note there's a leading dummy element
+ so lasus->end always points to an element
+ */
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ lasue->next = NULL;
+ lasue_end = lasus->end;
+
+ while( finished_flag == LFDS700_MISC_FLAG_LOWERED )
+ {
+ compare = NULL;
+
+ LFDS700_MISC_BARRIER_STORE;
+ LFDS700_PAL_ATOMIC_CAS_WITH_BACKOFF( &lasue_end->next, &compare, lasue, LFDS700_MISC_CAS_STRENGTH_STRONG, result, backoff_iteration, ps );
+
+ if( result == 1 )
+ finished_flag = LFDS700_MISC_FLAG_RAISED;
+ else
+ {
+ lasue_end = compare;
+ lasue_next = LFDS700_LIST_ASU_GET_NEXT( *lasue_end );
+
+ while( lasue_next != NULL )
+ {
+ lasue_end = lasue_next;
+ lasue_next = LFDS700_LIST_ASU_GET_NEXT( *lasue_end );
+ }
+ }
+ }
+
+ lasus->end = lasue;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds700_list_asu_insert_after_element( struct lfds700_list_asu_state *lasus,
+ struct lfds700_list_asu_element *lasue,
+ struct lfds700_list_asu_element *lasue_predecessor,
+ struct lfds700_misc_prng_state *ps )
+{
+ char unsigned
+ result;
+
+ lfds700_pal_uint_t
+ backoff_iteration = LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE;
+
+ LFDS700_PAL_ASSERT( lasus != NULL );
+ LFDS700_PAL_ASSERT( lasue != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasue->next % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasue->value % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &lasue->key % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( lasue_predecessor != NULL );
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ lasue->next = lasue_predecessor->next;
+
+ do
+ {
+ LFDS700_MISC_BARRIER_STORE;
+ LFDS700_PAL_ATOMIC_CAS_WITH_BACKOFF( &lasue_predecessor->next, &lasue->next, lasue, LFDS700_MISC_CAS_STRENGTH_WEAK, result, backoff_iteration, ps );
+ }
+ while( result != 1 );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds700_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_list_addonly_singlylinked_unordered_internal.h"
+
+/***** private prototypes *****/
+static void lfds700_list_asu_internal_validate( struct lfds700_list_asu_state *lasus, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_list_asu_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds700_list_asu_query( struct lfds700_list_asu_state *lasus, enum lfds700_list_asu_query query_type, void *query_input, void *query_output )
+{
+ LFDS700_PAL_ASSERT( lasus != NULL );
+ // TRD : query_type can be any value in its range
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT:
+ {
+ struct lfds700_list_asu_element
+ *lasue = NULL;
+
+ LFDS700_PAL_ASSERT( query_input == NULL );
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ *(lfds700_pal_uint_t *) query_output = 0;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*lasus, lasue) )
+ ( *(lfds700_pal_uint_t *) query_output )++;
+ }
+ break;
+
+ case LFDS700_LIST_ASU_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ lfds700_list_asu_internal_validate( lasus, (struct lfds700_misc_validation_info *) query_input, (enum lfds700_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+
+/****************************************************************************/
+static void lfds700_list_asu_internal_validate( struct lfds700_list_asu_state *lasus, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_list_asu_validity )
+{
+ lfds700_pal_uint_t
+ number_elements = 0;
+
+ struct lfds700_list_asu_element
+ *lasue_fast,
+ *lasue_slow;
+
+ LFDS700_PAL_ASSERT( lasus!= NULL );
+ // TRD : vi can be NULL
+ LFDS700_PAL_ASSERT( lfds700_list_asu_validity != NULL );
+
+ *lfds700_list_asu_validity = LFDS700_MISC_VALIDITY_VALID;
+
+ lasue_slow = lasue_fast = lasus->start->next;
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the start of the list
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the queue)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( lasue_slow != NULL )
+ do
+ {
+ lasue_slow = lasue_slow->next;
+
+ if( lasue_fast != NULL )
+ lasue_fast = lasue_fast->next;
+
+ if( lasue_fast != NULL )
+ lasue_fast = lasue_fast->next;
+ }
+ while( lasue_slow != NULL and lasue_fast != lasue_slow );
+
+ if( lasue_fast != NULL and lasue_slow != NULL and lasue_fast == lasue_slow )
+ *lfds700_list_asu_validity = LFDS700_MISC_VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds700_list_asu_validity == LFDS700_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ lfds700_list_asu_query( lasus, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, &number_elements );
+
+ if( number_elements < vi->min_elements )
+ *lfds700_list_asu_validity = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements > vi->max_elements )
+ *lfds700_list_asu_validity = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_misc_library_cleanup( void )
+{
+ // TRD : we do nuuuuuuthin'
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+struct lfds700_misc_globals
+ lfds700_misc_globals;
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_misc_library_init_valid_on_current_logical_core()
+{
+ /* TRD : the PRNG arrangement is that each thread has its own state, for a maximum-speed PRNG, where output
+ quality is second consideration to performance
+
+ on 64-bit platforms this is xorshift64*, on 32-bit platforms, an unadorned xorshift32
+
+ the seed for each thread however comes from a single, global, maximum-quality PRNG, where quality of
+ output is the primary consideration
+
+ for this, I'm using a xorshift1024*
+
+ since the generation from this global PRNG state is not thread safe, but is still quick in
+ thread start-up terms, I run a little spin-lock around it
+
+ regarding the seed for this high quality PRNG; it is customary to use time(), but this has a number of
+ drawbacks;
+
+ 1. liblfds would depend on time() (currently it does not depend on a hosted implementation of standard library)
+ 2. the output from time may only be 32 bit, and even when it isn't, the top 32 bits are currently all zero...
+ 3. many threads can begin in the same second; I'd need to add in their thread number,
+ which means I'd need to *get* their thread number...
+
+ as such, I've decided to use a *fixed* 64-bit seed for the high-quality PRNG; this seed is run
+ through the MurmerHash3 avalanche phase to generate successive 64-bit values, which populate
+ the 1024 state of xorshift1024*
+
+ if you have access to a high-frequency clock (often 64-bit), you can use this for the seed
+ (don't use it for the per-thread PRNG, unless you know the clock can be read without a context switch)
+
+ murmurhash3 code from here; http://xorshift.di.unimi.it/murmurhash3.c
+ */
+
+ lfds700_misc_prng_internal_big_slow_high_quality_init( LFDS700_MISC_PRNG_SEED );
+
+ lfds700_misc_globals.exponential_backoff_timeslot_length_in_loop_iterations_for_cas = EXPONENTIAL_BACKOFF_TIMESLOT_LENGTH_IN_INCS_FOR_CAS;
+ lfds700_misc_globals.exponential_backoff_timeslot_length_in_loop_iterations_for_dwcas = EXPONENTIAL_BACKOFF_TIMESLOT_LENGTH_IN_INCS_FOR_DWCAS;
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds700_internal.h"
+
+/***** defines *****/
+#define EXPONENTIAL_BACKOFF_TIMESLOT_LENGTH_IN_INCS_FOR_CAS 8
+#define EXPONENTIAL_BACKOFF_TIMESLOT_LENGTH_IN_INCS_FOR_DWCAS 16
+
+/***** private prototypes *****/
+void lfds700_misc_prng_internal_big_slow_high_quality_init( int long long unsigned seed );
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_misc_internal.h"
+
+/***** defines *****/
+#define LFDS700_PRNG_STATE_SIZE 16
+
+/***** struct *****/
+struct lfds700_misc_prng_big_slow_high_quality_state
+{
+ lfds700_pal_atom_t LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ xorshift1024star_spinlock;
+
+ // TRD : must be a 32 bit signed int
+ int
+ xorshift1024star_index;
+
+ int long long unsigned
+ xorshift1024star_state[LFDS700_PRNG_STATE_SIZE];
+};
+
+/***** locals *****/
+struct lfds700_misc_prng_big_slow_high_quality_state
+ pbshqs;
+
+/***** private prototypes *****/
+static void lfds700_misc_prng_internal_hash_murmurhash3( int long long unsigned *murmurhash3_state );
+static void lfds700_misc_prng_internal_big_slow_high_quality_generate( struct lfds700_misc_prng_big_slow_high_quality_state *ps, lfds700_pal_uint_t *random_value );
+
+
+
+
+
+/****************************************************************************/
+void lfds700_misc_prng_init( struct lfds700_misc_prng_state *ps )
+{
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ /* TRD : we use the big, slow, high quality PRNG to generate the initial value
+ for the small, fast, low qulity PRNG, which is used in exponential backoff
+
+ we need the load barrier to catch any changes to the backoff periods
+ */
+
+ lfds700_misc_prng_internal_big_slow_high_quality_generate( &pbshqs, &ps->prng_state );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ ps->local_copy_of_global_exponential_backoff_timeslot_length_in_loop_iterations_for_cas = lfds700_misc_globals.exponential_backoff_timeslot_length_in_loop_iterations_for_cas;
+ ps->local_copy_of_global_exponential_backoff_timeslot_length_in_loop_iterations_for_dwcas = lfds700_misc_globals.exponential_backoff_timeslot_length_in_loop_iterations_for_dwcas;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds700_misc_prng_internal_big_slow_high_quality_init( int long long unsigned seed )
+{
+ lfds700_pal_uint_t
+ loop;
+
+ LFDS700_PAL_ASSERT( seed != 0 ); // TRD : a 0 seed causes all zeros in the entropy state, so is forbidden
+
+ pbshqs.xorshift1024star_spinlock = LFDS700_MISC_FLAG_LOWERED;
+
+ for( loop = 0 ; loop < LFDS700_PRNG_STATE_SIZE ; loop++ )
+ {
+ lfds700_misc_prng_internal_hash_murmurhash3( &seed );
+ pbshqs.xorshift1024star_state[loop] = seed;
+ }
+
+ pbshqs.xorshift1024star_index = 0;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds700_misc_prng_internal_hash_murmurhash3( int long long unsigned *murmurhash3_state )
+{
+ LFDS700_PAL_ASSERT( murmurhash3_state != NULL );
+
+ *murmurhash3_state ^= *murmurhash3_state >> 33;
+ *murmurhash3_state *= 0xff51afd7ed558ccdULL;
+ *murmurhash3_state ^= *murmurhash3_state >> 33;
+ *murmurhash3_state *= 0xc4ceb9fe1a85ec53ULL;
+ *murmurhash3_state ^= *murmurhash3_state >> 33;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds700_misc_prng_internal_big_slow_high_quality_generate( struct lfds700_misc_prng_big_slow_high_quality_state *ps, lfds700_pal_uint_t *random_value )
+{
+ char unsigned
+ result;
+
+ int long long unsigned
+ xs_temp_one,
+ xs_temp_two;
+
+ lfds700_pal_atom_t
+ compare = LFDS700_MISC_FLAG_LOWERED,
+ exchange = LFDS700_MISC_FLAG_LOWERED;
+
+ LFDS700_PAL_ASSERT( ps != NULL );
+ LFDS700_PAL_ASSERT( random_value != NULL );
+
+ // TRD : this is single-threaded code, on a per-state basis
+ do
+ {
+ compare = LFDS700_MISC_FLAG_LOWERED;
+ LFDS700_PAL_ATOMIC_CAS( &ps->xorshift1024star_spinlock, &compare, (lfds700_pal_atom_t) LFDS700_MISC_FLAG_RAISED, LFDS700_MISC_CAS_STRENGTH_STRONG, result );
+ }
+ while( result == 0 );
+
+ // TRD : xorshift1024* code from here; http://xorshift.di.unimi.it/xorshift1024star.c
+
+ xs_temp_one = ps->xorshift1024star_state[ ps->xorshift1024star_index ];
+ ps->xorshift1024star_index = ( ps->xorshift1024star_index + 1 ) & 15;
+ xs_temp_two = ps->xorshift1024star_state[ ps->xorshift1024star_index ];
+
+ xs_temp_two ^= xs_temp_two << 31;
+ xs_temp_two ^= xs_temp_two >> 11;
+ xs_temp_one ^= xs_temp_one >> 30;
+
+ ps->xorshift1024star_state[ ps->xorshift1024star_index ] = xs_temp_one ^ xs_temp_two;
+
+ *random_value = (lfds700_pal_uint_t) ( ps->xorshift1024star_state[ ps->xorshift1024star_index ] * 1181783497276652981LL );
+
+ LFDS700_PAL_ATOMIC_EXCHANGE( &ps->xorshift1024star_spinlock, &exchange );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_misc_query( enum lfds700_misc_query query_type, void *query_input, void *query_output )
+{
+ // TRD : query type can be any value in its range
+ // TRD : query_input can be NULL in some cases
+ // TRD : query_outputput can be NULL in some cases
+
+ switch( query_type )
+ {
+ case LFDS700_MISC_QUERY_GET_EXPONENTIAL_BACKOFF_TIMESLOT_LENGTH_IN_LOOP_ITERATIONS_FOR_CAS:
+ *(lfds700_pal_atom_t *) query_output = lfds700_misc_globals.exponential_backoff_timeslot_length_in_loop_iterations_for_cas;
+ break;
+
+ case LFDS700_MISC_QUERY_SET_EXPONENTIAL_BACKOFF_TIMESLOT_LENGTH_IN_LOOP_ITERATIONS_FOR_CAS:
+ LFDS700_PAL_ATOMIC_EXCHANGE( &lfds700_misc_globals.exponential_backoff_timeslot_length_in_loop_iterations_for_cas, (lfds700_pal_atom_t *) query_input );
+ break;
+
+ case LFDS700_MISC_QUERY_GET_EXPONENTIAL_BACKOFF_TIMESLOT_LENGTH_IN_LOOP_ITERATIONS_FOR_DWCAS:
+ *(lfds700_pal_atom_t *) query_output = lfds700_misc_globals.exponential_backoff_timeslot_length_in_loop_iterations_for_dwcas;
+ break;
+
+ case LFDS700_MISC_QUERY_SET_EXPONENTIAL_BACKOFF_TIMESLOT_LENGTH_IN_LOOP_ITERATIONS_FOR_DWCAS:
+ LFDS700_PAL_ATOMIC_EXCHANGE( &lfds700_misc_globals.exponential_backoff_timeslot_length_in_loop_iterations_for_dwcas, (lfds700_pal_atom_t *) query_input );
+ break;
+
+ case LFDS700_MISC_QUERY_GET_BUILD_AND_VERSION_STRING:
+ {
+ char static const
+ * const build_and_version_string = "liblfds " LFDS700_MISC_VERSION_STRING " (" BUILD_TYPE_STRING ", " LFDS700_PAL_OS_STRING ", " MODE_TYPE_STRING ", " LFDS700_PAL_PROCESSOR_STRING ", " LFDS700_PAL_COMPILER_STRING ")";
+
+ LFDS700_PAL_ASSERT( query_input == NULL );
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ *(char const **) query_output = build_and_version_string;
+ }
+ break;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_queue_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_queue_cleanup( struct lfds700_queue_state *qs,
+ void (*element_cleanup_callback)(struct lfds700_queue_state *qs, struct lfds700_queue_element *qe, enum lfds700_misc_flag dummy_element_flag) )
+{
+ struct lfds700_queue_element
+ *qe;
+
+ void
+ *value;
+
+ LFDS700_PAL_ASSERT( qs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ {
+ while( qs->dequeue[POINTER] != qs->enqueue[POINTER] )
+ {
+ // TRD : trailing dummy element, so the first real value is in the next element
+ value = qs->dequeue[POINTER]->next[POINTER]->value;
+
+ // TRD : user is given back *an* element, but not the one his user data was in
+ qe = qs->dequeue[POINTER];
+
+ // TRD : remove the element from queue
+ qs->dequeue[POINTER] = qs->dequeue[POINTER]->next[POINTER];
+
+ // TRD : write value into the qe we're going to give the user
+ qe->value = value;
+
+ element_cleanup_callback( qs, qe, LFDS700_MISC_FLAG_LOWERED );
+ }
+
+ // TRD : and now the final element
+ element_cleanup_callback( qs, (struct lfds700_queue_element *) qs->dequeue[POINTER], LFDS700_MISC_FLAG_RAISED );
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_queue_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds700_queue_dequeue( struct lfds700_queue_state *qs,
+ struct lfds700_queue_element **qe,
+ struct lfds700_misc_prng_state *ps )
+{
+ char unsigned
+ result = 0,
+ unwanted_result;
+
+ enum lfds700_queue_queue_state
+ state = LFDS700_QUEUE_QUEUE_STATE_UNKNOWN;
+
+ int
+ rv = 1,
+ finished_flag = LFDS700_MISC_FLAG_LOWERED;
+
+ lfds700_pal_uint_t
+ backoff_iteration = LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE;
+
+ struct lfds700_queue_element LFDS700_PAL_ALIGN(LFDS700_PAL_ALIGN_DOUBLE_POINTER)
+ *dequeue[PAC_SIZE],
+ *enqueue[PAC_SIZE],
+ *next[PAC_SIZE];
+
+ void
+ *key = NULL,
+ *value = NULL;
+
+ LFDS700_PAL_ASSERT( qs != NULL );
+ LFDS700_PAL_ASSERT( qe != NULL );
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ do
+ {
+ dequeue[COUNTER] = qs->dequeue[COUNTER];
+ dequeue[POINTER] = qs->dequeue[POINTER];
+
+ enqueue[COUNTER] = qs->enqueue[COUNTER];
+ enqueue[POINTER] = qs->enqueue[POINTER];
+
+ next[COUNTER] = qs->dequeue[POINTER]->next[COUNTER];
+ next[POINTER] = qs->dequeue[POINTER]->next[POINTER];
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( dequeue[COUNTER] == qs->dequeue[COUNTER] and dequeue[POINTER] == qs->dequeue[POINTER] )
+ {
+ if( enqueue[POINTER] == dequeue[POINTER] and next[POINTER] == NULL )
+ state = LFDS700_QUEUE_QUEUE_STATE_EMPTY;
+
+ if( enqueue[POINTER] == dequeue[POINTER] and next[POINTER] != NULL )
+ state = LFDS700_QUEUE_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE;
+
+ if( enqueue[POINTER] != dequeue[POINTER] )
+ state = LFDS700_QUEUE_QUEUE_STATE_ATTEMPT_DEQUEUE;
+
+ switch( state )
+ {
+ case LFDS700_QUEUE_QUEUE_STATE_UNKNOWN:
+ // TRD : eliminates compiler warning
+ break;
+
+ case LFDS700_QUEUE_QUEUE_STATE_EMPTY:
+ rv = 0;
+ *qe = NULL;
+ finished_flag = LFDS700_MISC_FLAG_RAISED;
+ break;
+
+ case LFDS700_QUEUE_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE:
+ next[COUNTER] = enqueue[COUNTER] + 1;
+ LFDS700_MISC_BARRIER_STORE;
+ LFDS700_PAL_ATOMIC_DWCAS( qs->enqueue, enqueue, next, LFDS700_MISC_CAS_STRENGTH_WEAK, unwanted_result );
+ break;
+
+ case LFDS700_QUEUE_QUEUE_STATE_ATTEMPT_DEQUEUE:
+ key = next[POINTER]->key;
+ value = next[POINTER]->value;
+
+ next[COUNTER] = dequeue[COUNTER] + 1;
+ LFDS700_MISC_BARRIER_STORE;
+ LFDS700_PAL_ATOMIC_DWCAS_WITH_BACKOFF( qs->dequeue, dequeue, next, LFDS700_MISC_CAS_STRENGTH_WEAK, result, backoff_iteration, ps );
+
+ if( result == 1 )
+ finished_flag = LFDS700_MISC_FLAG_RAISED;
+ break;
+ }
+ }
+ }
+ while( finished_flag == LFDS700_MISC_FLAG_LOWERED );
+
+ if( result == 1 )
+ {
+ *qe = dequeue[POINTER];
+ (*qe)->key = key;
+ (*qe)->value = value;
+ }
+
+ return( rv );
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_queue_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_queue_enqueue( struct lfds700_queue_state *qs,
+ struct lfds700_queue_element *qe,
+ struct lfds700_misc_prng_state *ps )
+{
+ char unsigned
+ result = 0,
+ unwanted_result;
+
+ lfds700_pal_uint_t
+ backoff_iteration = LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE;
+
+ struct lfds700_queue_element LFDS700_PAL_ALIGN(LFDS700_PAL_ALIGN_DOUBLE_POINTER)
+ *volatile enqueue[PAC_SIZE],
+ *new_enqueue[PAC_SIZE],
+ *volatile next[PAC_SIZE];
+
+ LFDS700_PAL_ASSERT( qs != NULL );
+ LFDS700_PAL_ASSERT( qe != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) qe->next % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &qe->key % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ qe->next[COUNTER] = (struct lfds700_queue_element *) LFDS700_MISC_PRNG_GENERATE( ps );
+ qe->next[POINTER] = NULL;
+
+ new_enqueue[POINTER] = qe;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ do
+ {
+ enqueue[COUNTER] = qs->enqueue[COUNTER];
+ enqueue[POINTER] = qs->enqueue[POINTER];
+
+ next[COUNTER] = qs->enqueue[POINTER]->next[COUNTER];
+ next[POINTER] = qs->enqueue[POINTER]->next[POINTER];
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( qs->enqueue[COUNTER] == enqueue[COUNTER] and qs->enqueue[POINTER] == enqueue[POINTER] )
+ {
+ if( next[POINTER] == NULL )
+ {
+ new_enqueue[COUNTER] = next[COUNTER] + 1;
+ LFDS700_MISC_BARRIER_STORE;
+ LFDS700_PAL_ATOMIC_DWCAS_WITH_BACKOFF( enqueue[POINTER]->next, next, new_enqueue, LFDS700_MISC_CAS_STRENGTH_WEAK, result, backoff_iteration, ps );
+ }
+ else
+ {
+ next[COUNTER] = enqueue[COUNTER] + 1;
+ LFDS700_MISC_BARRIER_STORE;
+ // TRD : strictly, this is a weak CAS, but we do an extra iteration of the main loop on a fake failure, so we set it to be strong
+ LFDS700_PAL_ATOMIC_DWCAS( qs->enqueue, enqueue, next, LFDS700_MISC_CAS_STRENGTH_STRONG, unwanted_result );
+ }
+ }
+ }
+ while( result != 1 );
+
+ new_enqueue[COUNTER] = enqueue[COUNTER] + 1;
+ LFDS700_MISC_BARRIER_STORE;
+ // TRD : move enqueue along; only a weak CAS as the dequeue will solve this if its out of place
+ LFDS700_PAL_ATOMIC_DWCAS( qs->enqueue, enqueue, new_enqueue, LFDS700_MISC_CAS_STRENGTH_WEAK, unwanted_result );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_queue_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_queue_init_valid_on_current_logical_core( struct lfds700_queue_state *qs, struct lfds700_queue_element *qe_dummy, struct lfds700_misc_prng_state *ps, void *user_state )
+{
+ LFDS700_PAL_ASSERT( qs != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &qs->enqueue % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &qs->dequeue % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &qs->user_state % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( qe_dummy != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) qe_dummy->next % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &qe_dummy->key % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( ps != NULL );
+ // TRD : user_state can be UNLL
+
+ /* TRD : qe_dummy is a dummy element, needed for init
+ the qs->enqueue and qs->dequeue counters do not need to be initialized
+ but it does no harm to do so, and stops a valgrind complaint
+ */
+
+ qs->enqueue[POINTER] = qe_dummy;
+ qs->enqueue[COUNTER] = (struct lfds700_queue_element *) 0;
+ qs->dequeue[POINTER] = qe_dummy;
+ qs->dequeue[COUNTER] = (struct lfds700_queue_element *) 0;
+
+ qe_dummy->next[POINTER] = NULL;
+ qe_dummy->next[COUNTER] = (struct lfds700_queue_element *) LFDS700_MISC_PRNG_GENERATE( ps );
+ qe_dummy->value = NULL;
+
+ qs->user_state = user_state;
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds700_internal.h"
+
+/***** enums *****/
+enum lfds700_queue_queue_state
+{
+ LFDS700_QUEUE_QUEUE_STATE_UNKNOWN,
+ LFDS700_QUEUE_QUEUE_STATE_EMPTY,
+ LFDS700_QUEUE_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE,
+ LFDS700_QUEUE_QUEUE_STATE_ATTEMPT_DEQUEUE
+};
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_queue_internal.h"
+
+/***** private prototypes *****/
+static void lfds700_queue_internal_validate( struct lfds700_queue_state *qs, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_queue_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds700_queue_query( struct lfds700_queue_state *qs, enum lfds700_queue_query query_type, void *query_input, void *query_output )
+{
+ struct lfds700_queue_element
+ *qe;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ LFDS700_PAL_ASSERT( qs != NULL );
+ // TRD : query_type can be any value in its range
+
+ switch( query_type )
+ {
+ case LFDS700_QUEUE_QUERY_SINGLETHREADED_GET_COUNT:
+ LFDS700_PAL_ASSERT( query_input == NULL );
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ *(lfds700_pal_uint_t *) query_output = 0;
+
+ qe = (struct lfds700_queue_element *) qs->dequeue[POINTER];
+
+ while( qe != NULL )
+ {
+ ( *(lfds700_pal_uint_t *) query_output )++;
+ qe = (struct lfds700_queue_element *) qe->next[POINTER];
+ }
+
+ // TRD : remember there is a dummy element in the queue
+ ( *(lfds700_pal_uint_t *) query_output )--;
+ break;
+
+ case LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ lfds700_queue_internal_validate( qs, (struct lfds700_misc_validation_info *) query_input, (enum lfds700_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds700_queue_internal_validate( struct lfds700_queue_state *qs, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_queue_validity )
+{
+ lfds700_pal_uint_t
+ number_elements = 0;
+
+ struct lfds700_queue_element
+ *qe_fast,
+ *qe_slow;
+
+ LFDS700_PAL_ASSERT( qs != NULL );
+ // TRD : vi can be NULL
+ LFDS700_PAL_ASSERT( lfds700_queue_validity != NULL );
+
+ *lfds700_queue_validity = LFDS700_MISC_VALIDITY_VALID;
+
+ qe_slow = qe_fast = (struct lfds700_queue_element *) qs->dequeue[POINTER];
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the dequeue end of the queue
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the queue)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( qe_slow != NULL )
+ do
+ {
+ qe_slow = qe_slow->next[POINTER];
+
+ if( qe_fast != NULL )
+ qe_fast = qe_fast->next[POINTER];
+
+ if( qe_fast != NULL )
+ qe_fast = qe_fast->next[POINTER];
+ }
+ while( qe_slow != NULL and qe_fast != qe_slow );
+
+ if( qe_fast != NULL and qe_slow != NULL and qe_fast == qe_slow )
+ *lfds700_queue_validity = LFDS700_MISC_VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds700_queue_validity == LFDS700_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ lfds700_queue_query( qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_GET_COUNT, NULL, (void *) &number_elements );
+
+ if( number_elements < vi->min_elements )
+ *lfds700_queue_validity = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements > vi->max_elements )
+ *lfds700_queue_validity = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_queue_bounded_singleconsumer_singleproducer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_queue_bss_cleanup( struct lfds700_queue_bss_state *qbsss,
+ void (*element_cleanup_callback)(struct lfds700_queue_bss_state *qbsss, void *key, void *value) )
+{
+ int long long unsigned
+ loop;
+
+ struct lfds700_queue_bss_element
+ *qbsse;
+
+ LFDS700_PAL_ASSERT( qbsss != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ if( element_cleanup_callback != NULL )
+ for( loop = qbsss->read_index ; loop < qbsss->read_index + qbsss->number_elements ; loop++ )
+ {
+ qbsse = qbsss->element_array + (loop % qbsss->number_elements);
+ element_cleanup_callback( qbsss, qbsse->key, qbsse->value );
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_queue_bounded_singleconsumer_singleproducer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds700_queue_bss_dequeue( struct lfds700_queue_bss_state *qbsss, void **key, void **value )
+{
+ int
+ rv = 0;
+
+ struct lfds700_queue_bss_element
+ *qbsse;
+
+ LFDS700_PAL_ASSERT( qbsss != NULL );
+ // TRD : key can be NULL
+ // TRD : value can be NULL
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( qbsss->read_index != qbsss->write_index )
+ {
+ qbsse = qbsss->element_array + qbsss->read_index;
+
+ if( key != NULL )
+ *key = qbsse->key;
+
+ if( value != NULL )
+ *value = qbsse->value;
+
+ qbsss->read_index = (qbsss->read_index + 1) & qbsss->mask;
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ rv = 1;
+ }
+
+ return( rv );
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_queue_bounded_singleconsumer_singleproducer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds700_queue_bss_enqueue( struct lfds700_queue_bss_state *qbsss, void *key, void *value )
+{
+ int
+ rv = 0;
+
+ struct lfds700_queue_bss_element
+ *qbsse;
+
+ LFDS700_PAL_ASSERT( qbsss != NULL );
+ // TRD : key can be NULL
+ // TRD : value can be NULL
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( ( (qbsss->write_index+1) & qbsss->mask ) != qbsss->read_index )
+ {
+ qbsse = qbsss->element_array + qbsss->write_index;
+
+ qbsse->key = key;
+ qbsse->value = value;
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ qbsss->write_index = (qbsss->write_index + 1) & qbsss->mask;
+
+ rv = 1;
+ }
+
+ return( rv );
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_queue_bounded_singleconsumer_singleproducer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_queue_bss_init_valid_on_current_logical_core( struct lfds700_queue_bss_state *qbsss,
+ struct lfds700_queue_bss_element *element_array,
+ lfds700_pal_uint_t number_elements,
+ void *user_state )
+{
+ LFDS700_PAL_ASSERT( qbsss != NULL );
+ LFDS700_PAL_ASSERT( element_array != NULL );
+ LFDS700_PAL_ASSERT( number_elements >= 2 );
+ LFDS700_PAL_ASSERT( ( number_elements & (number_elements-1) ) == 0 ); // TRD : number_elements must be a positive integer power of 2
+ // TRD : user_state can be NULL
+
+ /* TRD : the use of mask and the restriction on a power of two
+ upon the number of elements bears some remark
+
+ in this queue, there are a fixed number of elements
+ we have a read index and a write index
+ when we write, and thre is space to write, we increment the write index
+ (if no space to write, we just return)
+ when we read, and there are elements to be read, we after reading increment the read index
+ (if no elements to read, we just return)
+ the problem is - how do we handle wrap around?
+ e.g. when I write, but my write index is now equal to the number of elements
+ the usual solution is to modulus the write index by the nunmber of elements
+ problem is modulus is slow
+ there is a better way
+ first, we restrict the number of elements to be a power of two
+ so imagine we have a 64-bit system and we set the number of elements to be 2^64
+ this gives us a bit pattern of 1000 0000 0000 0000 (...etc, lots of zeros)
+ now (just roll with this for a bit) subtract one from this
+ this gives us a mask (on a two's compliment machine)
+ 0111 1111 1111 1111 (...etc, lots of ones)
+ so what we do now, when we increment an index (think of the write index as the example)
+ we bitwise and it with the mask
+ now think about thwt happens
+ all the numbers up to 2^64 will be unchanged - their MSB is never set, and we and with all the other bits
+ but when we finally hit 2^64 and need to roll over... bingo!
+ we drop MSB (which we finally have) and have the value 0!
+ this is exactly what we want
+ bitwise and is much faster than modulus
+ */
+
+ qbsss->number_elements = number_elements;
+ qbsss->mask = qbsss->number_elements - 1;
+ qbsss->read_index = 0;
+ qbsss->write_index = 0;
+ qbsss->element_array = element_array;
+ qbsss->user_state = user_state;
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds700_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_queue_bounded_singleconsumer_singleproducer_internal.h"
+
+/***** private prototypes *****/
+static void lfds700_queue_bss_internal_validate( struct lfds700_queue_bss_state *qbsss, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds700_queue_bss_query( struct lfds700_queue_bss_state *qbsss, enum lfds700_queue_bss_query query_type, void *query_input, void *query_output )
+{
+ LFDS700_PAL_ASSERT( qbsss != NULL );
+ // TRD : query_type can be any value in its range
+
+ switch( query_type )
+ {
+ case LFDS700_QUEUE_BSS_QUERY_GET_POTENTIALLY_INACCURATE_COUNT:
+ LFDS700_PAL_ASSERT( query_input == NULL );
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ *(lfds700_pal_uint_t *) query_output = +( qbsss->write_index - qbsss->read_index );
+ if( qbsss->read_index > qbsss->write_index )
+ *(lfds700_pal_uint_t *) query_output = qbsss->number_elements - *(lfds700_pal_uint_t *) query_output;
+ break;
+
+ case LFDS700_QUEUE_BSS_QUERY_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ lfds700_queue_bss_internal_validate( qbsss, (struct lfds700_misc_validation_info *) query_input, (enum lfds700_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds700_queue_bss_internal_validate( struct lfds700_queue_bss_state *qbsss, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_validity )
+{
+ LFDS700_PAL_ASSERT( qbsss != NULL );
+ // TRD : vi can be NULL
+ LFDS700_PAL_ASSERT( lfds700_validity != NULL );
+
+ *lfds700_validity = LFDS700_MISC_VALIDITY_VALID;
+
+ if( vi != NULL )
+ {
+ lfds700_pal_uint_t
+ number_elements;
+
+ lfds700_queue_bss_query( qbsss, LFDS700_QUEUE_BSS_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_elements );
+
+ if( number_elements < vi->min_elements )
+ *lfds700_validity = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements > vi->max_elements )
+ *lfds700_validity = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_ringbuffer_internal.h"
+
+/***** private prototypes *****/
+static void lfds700_ringbuffer_internal_queue_element_cleanup_callback( struct lfds700_queue_state *qs, struct lfds700_queue_element *qe, enum lfds700_misc_flag dummy_element_flag );
+static void lfds700_ringbuffer_internal_freelist_element_cleanup_callback( struct lfds700_freelist_state *fs, struct lfds700_freelist_element *fe );
+
+
+
+
+
+/****************************************************************************/
+void lfds700_ringbuffer_cleanup( struct lfds700_ringbuffer_state *rs,
+ void (*element_cleanup_callback)(struct lfds700_ringbuffer_state *rs, void *key, void *value, enum lfds700_misc_flag unread_flag) )
+{
+ LFDS700_PAL_ASSERT( rs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ if( element_cleanup_callback != NULL )
+ {
+ rs->element_cleanup_callback = element_cleanup_callback;
+ lfds700_queue_cleanup( &rs->qs, lfds700_ringbuffer_internal_queue_element_cleanup_callback );
+ lfds700_freelist_cleanup( &rs->fs, lfds700_ringbuffer_internal_freelist_element_cleanup_callback );
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void lfds700_ringbuffer_internal_queue_element_cleanup_callback( struct lfds700_queue_state *qs, struct lfds700_queue_element *qe, enum lfds700_misc_flag dummy_element_flag )
+{
+ struct lfds700_ringbuffer_element
+ *re;
+
+ struct lfds700_ringbuffer_state
+ *rs;
+
+ LFDS700_PAL_ASSERT( qs != NULL );
+ LFDS700_PAL_ASSERT( qe != NULL );
+ // TRD : dummy_element can be any value in its range
+
+ rs = (struct lfds700_ringbuffer_state *) LFDS700_QUEUE_GET_USER_STATE_FROM_STATE( *qs );
+ re = (struct lfds700_ringbuffer_element *) LFDS700_QUEUE_GET_VALUE_FROM_ELEMENT( *qe );
+
+ if( dummy_element_flag == LFDS700_MISC_FLAG_LOWERED )
+ rs->element_cleanup_callback( rs, re->key, re->value, LFDS700_MISC_FLAG_RAISED );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void lfds700_ringbuffer_internal_freelist_element_cleanup_callback( struct lfds700_freelist_state *fs, struct lfds700_freelist_element *fe )
+{
+ struct lfds700_ringbuffer_element
+ *re;
+
+ struct lfds700_ringbuffer_state
+ *rs;
+
+ LFDS700_PAL_ASSERT( fs != NULL );
+ LFDS700_PAL_ASSERT( fe != NULL );
+
+ rs = (struct lfds700_ringbuffer_state *) LFDS700_FREELIST_GET_USER_STATE_FROM_STATE( *fs );
+ re = (struct lfds700_ringbuffer_element *) LFDS700_FREELIST_GET_VALUE_FROM_ELEMENT( *fe );
+
+ rs->element_cleanup_callback( rs, re->key, re->value, LFDS700_MISC_FLAG_LOWERED );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_ringbuffer_init_valid_on_current_logical_core( struct lfds700_ringbuffer_state *rs,
+ struct lfds700_ringbuffer_element *re_array_inc_dummy,
+ lfds700_pal_uint_t number_elements,
+ struct lfds700_misc_prng_state *ps,
+ void *user_state )
+{
+ lfds700_pal_uint_t
+ loop;
+
+ LFDS700_PAL_ASSERT( rs != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &rs->fs % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &rs->qs % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( re_array_inc_dummy != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &re_array_inc_dummy[0].fe % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &re_array_inc_dummy[0].qe % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( number_elements >= 2 );
+ LFDS700_PAL_ASSERT( ps != NULL );
+ // TRD : user_state can be NULL
+
+ rs->user_state = user_state;
+
+ re_array_inc_dummy[0].qe_use = &re_array_inc_dummy[0].qe;
+
+ lfds700_freelist_init_valid_on_current_logical_core( &rs->fs, rs );
+ lfds700_queue_init_valid_on_current_logical_core( &rs->qs, &re_array_inc_dummy[0].qe, ps, rs );
+
+ for( loop = 1 ; loop < number_elements ; loop++ )
+ {
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &re_array_inc_dummy[loop].fe % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &re_array_inc_dummy[loop].qe % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+
+ re_array_inc_dummy[loop].qe_use = &re_array_inc_dummy[loop].qe;
+ LFDS700_FREELIST_SET_VALUE_IN_ELEMENT( re_array_inc_dummy[loop].fe, &re_array_inc_dummy[loop] );
+ lfds700_freelist_push( &rs->fs, &re_array_inc_dummy[loop].fe, ps );
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds700_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_ringbuffer_internal.h"
+
+/***** private prototypes *****/
+static void lfds700_ringbuffer_internal_validate( struct lfds700_ringbuffer_state *rs, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_queue_validity, enum lfds700_misc_validity *lfds700_freelist_validity );
+
+
+
+/****************************************************************************/
+void lfds700_ringbuffer_query( struct lfds700_ringbuffer_state *rs, enum lfds700_ringbuffer_query query_type, void *query_input, void *query_output )
+{
+ LFDS700_PAL_ASSERT( rs != NULL );
+ // TRD : query_type can be any value in its range
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS700_RINGBUFFER_QUERY_SINGLETHREADED_GET_COUNT:
+ LFDS700_PAL_ASSERT( query_input == NULL );
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ lfds700_queue_query( &rs->qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_GET_COUNT, NULL, query_output );
+ break;
+
+ case LFDS700_RINGBUFFER_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ lfds700_ringbuffer_internal_validate( rs, (struct lfds700_misc_validation_info *) query_input, (enum lfds700_misc_validity *) query_output, ((enum lfds700_misc_validity *) query_output)+1 );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds700_ringbuffer_internal_validate( struct lfds700_ringbuffer_state *rs, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_queue_validity, enum lfds700_misc_validity *lfds700_freelist_validity )
+{
+ LFDS700_PAL_ASSERT( rs != NULL );
+ // TRD : vi can be NULL
+ LFDS700_PAL_ASSERT( lfds700_queue_validity != NULL );
+ LFDS700_PAL_ASSERT( lfds700_freelist_validity != NULL );
+
+ if( vi == NULL )
+ {
+ lfds700_queue_query( &rs->qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE, NULL, lfds700_queue_validity );
+ lfds700_freelist_query( &rs->fs, LFDS700_FREELIST_QUERY_SINGLETHREADED_VALIDATE, NULL, lfds700_freelist_validity );
+ }
+
+ if( vi != NULL )
+ {
+ struct lfds700_misc_validation_info
+ freelist_vi,
+ queue_vi;
+
+ queue_vi.min_elements = 0;
+ freelist_vi.min_elements = 0;
+ queue_vi.max_elements = vi->max_elements;
+ freelist_vi.max_elements = vi->max_elements;
+
+ lfds700_queue_query( &rs->qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE, &queue_vi, lfds700_queue_validity );
+ lfds700_freelist_query( &rs->fs, LFDS700_FREELIST_QUERY_SINGLETHREADED_VALIDATE, &freelist_vi, lfds700_freelist_validity );
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds700_ringbuffer_read( struct lfds700_ringbuffer_state *rs,
+ void **key,
+ void **value,
+ struct lfds700_misc_prng_state *ps )
+{
+ int
+ rv;
+
+ struct lfds700_queue_element
+ *qe;
+
+ struct lfds700_ringbuffer_element
+ *re;
+
+ LFDS700_PAL_ASSERT( rs != NULL );
+ // TRD : key can be NULL
+ // TRD : value can be NULL
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ rv = lfds700_queue_dequeue( &rs->qs, &qe, ps );
+
+ if( rv == 1 )
+ {
+ re = LFDS700_QUEUE_GET_VALUE_FROM_ELEMENT( *qe );
+ re->qe_use = (struct lfds700_queue_element *) qe;
+ if( key != NULL )
+ *key = re->key;
+ if( value != NULL )
+ *value = re->value;
+ LFDS700_FREELIST_SET_VALUE_IN_ELEMENT( re->fe, re );
+ lfds700_freelist_push( &rs->fs, &re->fe, ps );
+ }
+
+ return( rv );
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_ringbuffer_write( struct lfds700_ringbuffer_state *rs,
+ void *key,
+ void *value,
+ enum lfds700_misc_flag *overwrite_occurred_flag,
+ void **overwritten_key,
+ void **overwritten_value,
+ struct lfds700_misc_prng_state *ps )
+{
+ int
+ rv = 0;
+
+ struct lfds700_freelist_element
+ *fe;
+
+ struct lfds700_queue_element
+ *qe;
+
+ struct lfds700_ringbuffer_element
+ *re = NULL;
+
+ LFDS700_PAL_ASSERT( rs != NULL );
+ // TRD : key can be NULL
+ // TRD : value can be NULL
+ // TRD : overwrite_occurred_flag can be NULL
+ // TRD : overwritten_key can be NULL
+ // TRD : overwritten_value can be NULL
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ if( overwrite_occurred_flag != NULL )
+ *overwrite_occurred_flag = LFDS700_MISC_FLAG_LOWERED;
+
+ do
+ {
+ rv = lfds700_freelist_pop( &rs->fs, &fe, ps );
+
+ if( rv == 1 )
+ re = LFDS700_FREELIST_GET_VALUE_FROM_ELEMENT( *fe );
+
+ if( rv == 0 )
+ {
+ // TRD : the queue can return empty as well - remember, we're lock-free; anything could have happened since the previous instruction
+ rv = lfds700_queue_dequeue( &rs->qs, &qe, ps );
+
+ if( rv == 1 )
+ {
+ re = LFDS700_QUEUE_GET_VALUE_FROM_ELEMENT( *qe );
+ re->qe_use = (struct lfds700_queue_element *) qe;
+
+ if( overwrite_occurred_flag != NULL )
+ *overwrite_occurred_flag = LFDS700_MISC_FLAG_RAISED;
+
+ if( overwritten_key != NULL )
+ *overwritten_key = re->key;
+
+ if( overwritten_value != NULL )
+ *overwritten_value = re->value;
+ }
+ }
+ }
+ while( rv == 0 );
+
+ re->key = key;
+ re->value = value;
+
+ LFDS700_QUEUE_SET_VALUE_IN_ELEMENT( *re->qe_use, re );
+ lfds700_queue_enqueue( &rs->qs, re->qe_use, ps );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_stack_cleanup( struct lfds700_stack_state *ss,
+ void (*element_cleanup_callback)(struct lfds700_stack_state *ss, struct lfds700_stack_element *se) )
+{
+ struct lfds700_stack_element
+ *se,
+ *se_temp;
+
+ LFDS700_PAL_ASSERT( ss != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ {
+ se = ss->top[POINTER];
+
+ while( se != NULL )
+ {
+ se_temp = se;
+ se = se->next;
+
+ element_cleanup_callback( ss, se_temp );
+ }
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_stack_init_valid_on_current_logical_core( struct lfds700_stack_state *ss, void *user_state )
+{
+ LFDS700_PAL_ASSERT( ss != NULL );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) ss->top % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS700_PAL_ASSERT( (lfds700_pal_uint_t) &ss->user_state % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ // TRD : user_state can be NULL
+
+ ss->top[POINTER] = NULL;
+ ss->top[COUNTER] = 0;
+
+ ss->user_state = user_state;
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds700_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds700_stack_pop( struct lfds700_stack_state *ss, struct lfds700_stack_element **se, struct lfds700_misc_prng_state *ps )
+{
+ char unsigned
+ result;
+
+ lfds700_pal_uint_t
+ backoff_iteration = LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE;
+
+ struct lfds700_stack_element LFDS700_PAL_ALIGN(LFDS700_PAL_ALIGN_DOUBLE_POINTER)
+ *new_top[PAC_SIZE],
+ *volatile original_top[PAC_SIZE];
+
+ LFDS700_PAL_ASSERT( ss != NULL );
+ LFDS700_PAL_ASSERT( se != NULL );
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ LFDS700_PAL_BARRIER_PROCESSOR_LOAD;
+
+ original_top[COUNTER] = ss->top[COUNTER];
+ original_top[POINTER] = ss->top[POINTER];
+
+ do
+ {
+ if( original_top[POINTER] == NULL )
+ {
+ *se = NULL;
+ return( 0 );
+ }
+
+ new_top[COUNTER] = original_top[COUNTER] + 1;
+ new_top[POINTER] = original_top[POINTER]->next;
+
+ LFDS700_PAL_ATOMIC_DWCAS_WITH_BACKOFF( &ss->top, original_top, new_top, LFDS700_MISC_CAS_STRENGTH_WEAK, result, backoff_iteration, ps );
+
+ if( result != 1 )
+ LFDS700_PAL_BARRIER_PROCESSOR_LOAD;
+ }
+ while( result != 1 );
+
+ *se = original_top[POINTER];
+
+ return( 1 );
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds700_stack_push( struct lfds700_stack_state *ss, struct lfds700_stack_element *se, struct lfds700_misc_prng_state *ps )
+{
+ char unsigned
+ result;
+
+ lfds700_pal_uint_t
+ backoff_iteration = LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE;
+
+ struct lfds700_stack_element LFDS700_PAL_ALIGN(LFDS700_PAL_ALIGN_DOUBLE_POINTER)
+ *new_top[PAC_SIZE],
+ *volatile original_top[PAC_SIZE];
+
+ LFDS700_PAL_ASSERT( ss != NULL );
+ LFDS700_PAL_ASSERT( se != NULL );
+ LFDS700_PAL_ASSERT( ps != NULL );
+
+ new_top[POINTER] = se;
+
+ original_top[COUNTER] = ss->top[COUNTER];
+ original_top[POINTER] = ss->top[POINTER];
+
+ do
+ {
+ new_top[COUNTER] = original_top[COUNTER] + 1;
+ se->next = original_top[POINTER];
+
+ LFDS700_PAL_BARRIER_PROCESSOR_STORE;
+ LFDS700_PAL_ATOMIC_DWCAS_WITH_BACKOFF( &ss->top, original_top, new_top, LFDS700_MISC_CAS_STRENGTH_WEAK, result, backoff_iteration, ps );
+ }
+ while( result != 1 );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds700_stack_internal.h"
+
+/***** private prototypes *****/
+static void lfds700_stack_internal_stack_validate( struct lfds700_stack_state *ss, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_stack_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds700_stack_query( struct lfds700_stack_state *ss, enum lfds700_stack_query query_type, void *query_input, void *query_output )
+{
+ struct lfds700_stack_element
+ *se;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ LFDS700_PAL_ASSERT( ss != NULL );
+ // TRD : query_type can be any value in its range
+
+ switch( query_type )
+ {
+ case LFDS700_STACK_QUERY_SINGLETHREADED_GET_COUNT:
+ LFDS700_PAL_ASSERT( query_input == NULL );
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ *(lfds700_pal_uint_t *) query_output = 0;
+
+ se = (struct lfds700_stack_element *) ss->top[POINTER];
+
+ while( se != NULL )
+ {
+ ( *(lfds700_pal_uint_t *) query_output )++;
+ se = (struct lfds700_stack_element *) se->next;
+ }
+ break;
+
+ case LFDS700_STACK_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS700_PAL_ASSERT( query_output != NULL );
+
+ lfds700_stack_internal_stack_validate( ss, (struct lfds700_misc_validation_info *) query_input, (enum lfds700_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds700_stack_internal_stack_validate( struct lfds700_stack_state *ss, struct lfds700_misc_validation_info *vi, enum lfds700_misc_validity *lfds700_stack_validity )
+{
+ lfds700_pal_uint_t
+ number_elements = 0;
+
+ struct lfds700_stack_element
+ *se_fast,
+ *se_slow;
+
+ LFDS700_PAL_ASSERT( ss != NULL );
+ // TRD : vi can be NULL
+ LFDS700_PAL_ASSERT( lfds700_stack_validity != NULL );
+
+ *lfds700_stack_validity = LFDS700_MISC_VALIDITY_VALID;
+
+ se_slow = se_fast = (struct lfds700_stack_element *) ss->top[POINTER];
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the top of the stack
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the stack)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( se_slow != NULL )
+ do
+ {
+ se_slow = se_slow->next;
+
+ if( se_fast != NULL )
+ se_fast = se_fast->next;
+
+ if( se_fast != NULL )
+ se_fast = se_fast->next;
+ }
+ while( se_slow != NULL and se_fast != se_slow );
+
+ if( se_fast != NULL and se_slow != NULL and se_fast == se_slow )
+ *lfds700_stack_validity = LFDS700_MISC_VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds700_stack_validity == LFDS700_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ lfds700_stack_query( ss, LFDS700_STACK_QUERY_SINGLETHREADED_GET_COUNT, NULL, (void *) &number_elements );
+
+ if( number_elements < vi->min_elements )
+ *lfds700_stack_validity = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements > vi->max_elements )
+ *lfds700_stack_validity = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** public prototypes *****/
+#include "../inc/liblfds700.h"
+
+/***** defines *****/
+#define and &&
+#define or ||
+
+#define NO_FLAGS 0x0
+
+#define LFDS700_ABSTRACTION_BACKOFF_LIMIT (0x1 << 10)
+
+#if( defined _KERNEL_MODE )
+ #define MODE_TYPE_STRING "kernel-mode"
+#endif
+
+#if( !defined _KERNEL_MODE )
+ #define MODE_TYPE_STRING "user-mode"
+#endif
+
+#if( defined NDEBUG && !defined COVERAGE && !defined TSAN )
+ #define BUILD_TYPE_STRING "release"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN )
+ #define BUILD_TYPE_STRING "debug"
+#endif
+
+#if( !defined NDEBUG && defined COVERAGE && !defined TSAN )
+ #define BUILD_TYPE_STRING "coverage"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && defined TSAN )
+ #define BUILD_TYPE_STRING "threadsanitizer"
+#endif
+
+// TRD : lfds700_pal_atom_t volatile *destination, lfds700_pal_atom_t *compare, lfds700_pal_atom_t new_destination, enum lfds700_misc_cas_strength cas_strength, char unsigned result, lfds700_pal_uint_t *backoff_iteration
+#define LFDS700_PAL_ATOMIC_CAS_WITH_BACKOFF( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result, backoff_iteration, ps ) \
+{ \
+ LFDS700_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ); \
+ \
+ if( result == 0 ) \
+ { \
+ lfds700_pal_uint_t \
+ endloop; \
+ \
+ lfds700_pal_uint_t volatile \
+ loop; \
+ \
+ if( (backoff_iteration) == LFDS700_ABSTRACTION_BACKOFF_LIMIT ) \
+ (backoff_iteration) = LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE; \
+ \
+ if( (backoff_iteration) == LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE ) \
+ (backoff_iteration) = 1; \
+ else \
+ { \
+ endloop = ( LFDS700_MISC_PRNG_GENERATE(ps) % (backoff_iteration) ) * ps->local_copy_of_global_exponential_backoff_timeslot_length_in_loop_iterations_for_cas; \
+ for( loop = 0 ; loop < endloop ; loop++ ); \
+ } \
+ \
+ (backoff_iteration) <<= 1; \
+ } \
+}
+
+// TRD : lfds700_pal_atom_t volatile (*destination)[2], lfds700_pal_atom_t (*compare)[2], lfds700_pal_atom_t (*new_destination)[2], enum lfds700_misc_cas_strength cas_strength, char unsigned result, lfds700_pal_uint_t *backoff_iteration
+#define LFDS700_PAL_ATOMIC_DWCAS_WITH_BACKOFF( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result, backoff_iteration, ps ) \
+{ \
+ LFDS700_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ); \
+ \
+ if( result == 0 ) \
+ { \
+ lfds700_pal_uint_t \
+ endloop; \
+ \
+ lfds700_pal_uint_t volatile \
+ loop; \
+ \
+ if( (backoff_iteration) == LFDS700_ABSTRACTION_BACKOFF_LIMIT ) \
+ (backoff_iteration) = LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE; \
+ \
+ if( (backoff_iteration) == LFDS700_MISC_ABSTRACTION_BACKOFF_INITIAL_VALUE ) \
+ (backoff_iteration) = 1; \
+ else \
+ { \
+ endloop = ( LFDS700_MISC_PRNG_GENERATE(ps) % (backoff_iteration) ) * ps->local_copy_of_global_exponential_backoff_timeslot_length_in_loop_iterations_for_dwcas; \
+ for( loop = 0 ; loop < endloop ; loop++ ); \
+ } \
+ \
+ (backoff_iteration) <<= 1; \
+ } \
+}
+
+/***** library-wide prototypes *****/
+
--- /dev/null
+##### paths #####
+BINDIR := ../../bin
+OBJDIR := ../../obj
+SRCDIR := ../../src
+LIBINCDIRS := ../../../liblfds700/inc/
+LIBBINDIRS := ../../../liblfds700/bin/
+
+##### misc #####
+QUIETLY := 1>nul 2>nul
+
+##### sources, objects and libraries #####
+BINNAME := test
+BINARY := $(BINDIR)/$(BINNAME)
+SRCDIRS := .
+SOURCES := main.c misc.c \
+ test_lfds700_btree_addonly_unbalanced.c test_lfds700_btree_addonly_unbalanced_alignment.c test_lfds700_btree_addonly_unbalanced_random_adds_fail.c test_lfds700_btree_addonly_unbalanced_random_adds_fail_and_overwrite.c test_lfds700_btree_addonly_unbalanced_random_adds_overwrite.c \
+ test_lfds700_freelist.c test_lfds700_freelist_alignment.c test_lfds700_freelist_popping.c test_lfds700_freelist_popping_and_pushing.c test_lfds700_freelist_pushing.c test_lfds700_freelist_rapid_popping_and_pushing.c \
+ test_lfds700_hash_addonly.c test_lfds700_hash_addonly_alignment.c test_lfds700_hash_addonly_iterate.c test_lfds700_hash_addonly_random_adds_fail.c test_lfds700_hash_addonly_random_adds_fail_and_overwrite.c test_lfds700_hash_addonly_random_adds_overwrite.c \
+ test_lfds700_list_addonly_ordered_singlylinked.c test_lfds700_list_addonly_ordered_singlylinked_alignment.c test_lfds700_list_addonly_ordered_singlylinked_new_ordered.c test_lfds700_list_addonly_ordered_singlylinked_new_ordered_with_cursor.c \
+ test_lfds700_list_addonly_singlylinked_unordered.c test_lfds700_list_addonly_singlylinked_unordered_alignment.c test_lfds700_list_addonly_singlylinked_unordered_new_after.c test_lfds700_list_addonly_singlylinked_unordered_new_end.c test_lfds700_list_addonly_singlylinked_unordered_new_start.c \
+ test_lfds700_porting_abstraction_layer_atomic.c test_lfds700_porting_abstraction_layer_atomic_cas.c test_lfds700_porting_abstraction_layer_atomic_dcas.c test_lfds700_porting_abstraction_layer_atomic_exchange.c \
+ test_lfds700_queue.c test_lfds700_queue_alignment.c test_lfds700_queue_dequeuing.c test_lfds700_queue_enqueuing.c test_lfds700_queue_enqueuing_and_dequeuing.c test_lfds700_queue_enqueuing_and_dequeuing_with_free.c test_lfds700_queue_enqueuing_with_malloc_and_dequeuing_with_free.c test_lfds700_queue_rapid_enqueuing_and_dequeuing.c \
+ test_lfds700_queue_bounded_singleconsumer_singleproducer.c test_lfds700_queue_bounded_singleconsumer_singleproducer_dequeuing.c test_lfds700_queue_bounded_singleconsumer_singleproducer_enqueuing.c test_lfds700_queue_bounded_singleconsumer_singleproducer_enqueuing_and_dequeuing.c \
+ test_lfds700_ringbuffer.c test_lfds700_ringbuffer_reading.c test_lfds700_ringbuffer_reading_and_writing.c test_lfds700_ringbuffer_writing.c \
+ test_lfds700_stack.c test_lfds700_stack_alignment.c test_lfds700_stack_popping.c test_lfds700_stack_popping_and_pushing.c test_lfds700_stack_pushing.c test_lfds700_stack_rapid_popping_and_pushing.c \
+ test_porting_abstraction_layer_get_logical_core_ids.c test_porting_abstraction_layer_thread_start.c test_porting_abstraction_layer_thread_wait.c \
+ util_cmdline.c util_memory_helpers.c util_thread_starter.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))
+SYSLIBS := -lm -lpthread -lrt
+USRLIBS := -llfds700
+
+##### default paths #####
+CPATH += $(LIBINCDIRS)
+LIBRARY_PATH += $(LIBBINDIRS)
+export CPATH
+export LIBRARY_PATH
+
+##### tools #####
+MAKE := make
+MFLAGS :=
+
+DG := gcc
+DGFLAGS := -MM -std=gnu89
+
+CC := gcc
+CFBASE := -c -pthread -std=gnu89 -Wall -Wno-unknown-pragmas
+CFCOV := -O0 -ggdb -DCOVERAGE -fprofile-arcs -ftest-coverage
+CFDBG := -O0 -ggdb -D_DEBUG
+CFPROF := -O0 -ggdb -DPROF -pg
+CFREL := -O2 -DNDEBUG -finline-functions -Wno-strict-aliasing
+CFTSAN := -O0 -ggdb -DTSAN -fsanitize=thread -fPIE
+
+LD := gcc
+LFBASE := -pthread -std=gnu89 -Wall -Werror
+LFCOV := -O0 -fprofile-arcs -ftest-coverage
+LFDBG := -O0 -ggdb
+LFPROF := -O0 -pg
+LFREL := -O2 -s -finline-functions
+LFTSAN := -O0 -fsanitize=thread -pie
+
+PROF := gprof
+PFBASE := -b -p -Q $(BINARY) gmon.out
+
+##### variants and libnuma check #####
+CFLAGS += $(CFBASE)
+LFLAGS += $(LFBASE)
+
+ifeq ($(MAKECMDGOALS),)
+ CFLAGS += $(CFDBG)
+ LFLAGS += $(LFDBG)
+endif
+
+ifeq ($(MAKECMDGOALS),cov)
+ CFLAGS += $(CFCOV)
+ LFLAGS += $(LFCOV)
+ SYSLIBS += -lgcov
+endif
+
+ifeq ($(MAKECMDGOALS),dbg)
+ CFLAGS += $(CFDBG)
+ LFLAGS += $(LFDBG)
+endif
+
+ifeq ($(MAKECMDGOALS),prof)
+ CFLAGS += $(CFPROF)
+ LFLAGS += $(LFPROF)
+endif
+
+ifeq ($(MAKECMDGOALS),rel)
+ CFLAGS += $(CFREL)
+ LFLAGS += $(LFREL)
+endif
+
+ifeq ($(MAKECMDGOALS),tsan)
+ CFLAGS += $(CFTSAN)
+ LFLAGS += $(LFTSAN)
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.o : %.c
+ $(DG) $(DGFLAGS) $< >$(OBJDIR)/$*.d
+ $(CC) $(CFLAGS) -o $@ $<
+
+##### explicit rules #####
+$(BINARY) : $(OBJECTS)
+ $(LD) -o $(BINARY) $(LFLAGS) $(OBJECTS) $(USRLIBS) $(SYSLIBS)
+ chmod +x $(BINARY)
+
+##### phony #####
+.PHONY : clean cov dbg prof rel tsan
+
+clean :
+ @rm -f $(BINDIR)/$(BINNAME) $(OBJDIR)/*.o $(OBJDIR)/*.d $(OBJDIR)/*.gcno
+
+cov : $(BINARY)
+dbg : $(BINARY)
+prof : $(BINARY)
+rel : $(BINARY)
+tsan : $(BINARY)
+
+genprof :
+ @$(PROF) $(PFBASE)
+
+##### dependencies #####
+-include $(DEPENDS)
+
--- /dev/null
+##### paths #####
+BINDIR := ..\..\bin
+OBJDIR := ..\..\obj
+SRCDIR := ..\..\src
+
+##### misc #####
+QUIETLY := 1>nul 2>nul
+NULL :=
+SPACE := $(NULL) # TRD : necessary trailing space after the close bracket
+
+##### sources, objects and libraries #####
+BINNAME := test
+BINARY := $(BINDIR)\$(BINNAME).exe
+SRCDIRS := .
+SOURCES := main.c misc.c \
+ test_lfds700_btree_addonly_unbalanced.c test_lfds700_btree_addonly_unbalanced_alignment.c test_lfds700_btree_addonly_unbalanced_random_adds_fail.c test_lfds700_btree_addonly_unbalanced_random_adds_fail_and_overwrite.c test_lfds700_btree_addonly_unbalanced_random_adds_overwrite.c \
+ test_lfds700_freelist.c test_lfds700_freelist_alignment.c test_lfds700_freelist_popping.c test_lfds700_freelist_popping_and_pushing.c test_lfds700_freelist_pushing.c test_lfds700_freelist_rapid_popping_and_pushing.c \
+ test_lfds700_hash_addonly.c test_lfds700_hash_addonly_alignment.c test_lfds700_hash_addonly_iterate.c test_lfds700_hash_addonly_random_adds_fail.c test_lfds700_hash_addonly_random_adds_fail_and_overwrite.c test_lfds700_hash_addonly_random_adds_overwrite.c \
+ test_lfds700_list_addonly_ordered_singlylinked.c test_lfds700_list_addonly_ordered_singlylinked_alignment.c test_lfds700_list_addonly_ordered_singlylinked_new_ordered.c test_lfds700_list_addonly_ordered_singlylinked_new_ordered_with_cursor.c \
+ test_lfds700_list_addonly_singlylinked_unordered.c test_lfds700_list_addonly_singlylinked_unordered_alignment.c test_lfds700_list_addonly_singlylinked_unordered_new_after.c test_lfds700_list_addonly_singlylinked_unordered_new_end.c test_lfds700_list_addonly_singlylinked_unordered_new_start.c \
+ test_lfds700_porting_abstraction_layer_atomic.c test_lfds700_porting_abstraction_layer_atomic_cas.c test_lfds700_porting_abstraction_layer_atomic_dcas.c test_lfds700_porting_abstraction_layer_atomic_exchange.c \
+ test_lfds700_queue.c test_lfds700_queue_alignment.c test_lfds700_queue_dequeuing.c test_lfds700_queue_enqueuing.c test_lfds700_queue_enqueuing_and_dequeuing.c test_lfds700_queue_enqueuing_and_dequeuing_with_free.c test_lfds700_queue_enqueuing_with_malloc_and_dequeuing_with_free.c test_lfds700_queue_rapid_enqueuing_and_dequeuing.c \
+ test_lfds700_queue_bounded_singleconsumer_singleproducer.c test_lfds700_queue_bounded_singleconsumer_singleproducer_dequeuing.c test_lfds700_queue_bounded_singleconsumer_singleproducer_enqueuing.c test_lfds700_queue_bounded_singleconsumer_singleproducer_enqueuing_and_dequeuing.c \
+ test_lfds700_ringbuffer.c test_lfds700_ringbuffer_reading.c test_lfds700_ringbuffer_reading_and_writing.c test_lfds700_ringbuffer_writing.c \
+ test_lfds700_stack.c test_lfds700_stack_alignment.c test_lfds700_stack_popping.c test_lfds700_stack_popping_and_pushing.c test_lfds700_stack_pushing.c test_lfds700_stack_rapid_popping_and_pushing.c \
+ test_porting_abstraction_layer_get_logical_core_ids.c test_porting_abstraction_layer_thread_start.c test_porting_abstraction_layer_thread_wait.c \
+ util_cmdline.c util_memory_helpers.c util_thread_starter.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))
+RESFILE := $(patsubst %.rc,$(OBJDIR)/%.res,$(notdir $(RCFILE)))
+SYSLIBS := kernel32.lib
+EXTLIBS :=
+USRLIBS := ../../../liblfds700/bin/liblfds700.lib
+
+##### default paths fix up #####
+INCLUDE += ;../../../../liblfds700/inc/
+LIB += ;../../../../liblfds700/bin/
+
+##### tools #####
+MAKE := make
+MFLAGS :=
+
+CC := cl
+CFBASE := /c /D_CRT_SECURE_NO_WARNINGS /DWIN32_LEAN_AND_MEAN /DUNICODE /D_UNICODE /DUNICODE "/Fd$(BINDIR)\$(BINNAME).pdb" /nologo /W4 /WX
+CFREL := /DNDEBUG /Ox
+CFDBG := /D_DEBUG /Od /Gm /Zi
+
+LD := link
+LFBASE := /nologo /subsystem:console /nodefaultlib /nxcompat /wx
+LFREL := /incremental:no
+LFDBG := /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"
+
+##### variants #####
+CFLAGS := $(CFBASE) $(CFDBG) /MTd
+LFLAGS := $(LFBASE) $(LFDBG)
+CLIB := libcmtd.lib
+
+ifeq ($(MAKECMDGOALS),librel)
+ CFLAGS := $(CFBASE) $(CFREL) /MT
+ LFLAGS := $(LFBASE) $(LFREL)
+ CLIB := libcmt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),libdbg)
+ CFLAGS := $(CFBASE) $(CFDBG) /MTd
+ LFLAGS := $(LFBASE) $(LFDBG)
+ CLIB := libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dllrel)
+ CFLAGS := $(CFBASE) $(CFREL) /MD
+ LFLAGS := $(LFBASE) $(LFREL)
+ CLIB := msvcrt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dlldbg)
+ CFLAGS := $(CFBASE) $(CFDBG) /MDd
+ LFLAGS := $(LFBASE) $(LFDBG)
+ CLIB := msvcrtd.lib
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.obj : %.c
+ $(CC) $(CFLAGS) "/Fo$@" $<
+
+##### explicit rules #####
+$(BINARY) : $(OBJECTS) $(USRLIBS)
+ $(LD) $(LFLAGS) $(CLIB) $(SYSLIBS) $(EXTLIBS) $(USRLIBS) $(OBJECTS) /out:$(BINARY)
+
+##### phony #####
+.PHONY : clean librel libdbg dllrel dlldbg
+
+clean :
+ @erase /Q $(OBJDIR)\*.obj $(OBJDIR)\*.res $(BINDIR)\$(BINNAME).* $(QUIETLY)
+
+dlldbg : $(BINARY)
+dllrel : $(BINARY)
+
+libdbg : $(BINARY)
+librel : $(BINARY)
+
+##### notes #####
+# TRD : we fix up the default paths because cl and link require an extra argument per additional path, which is ugly as hell
+
--- /dev/null
+\r
+Microsoft Visual Studio Solution File, Format Version 12.00\r
+# Visual Studio 2012\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "test", "test.vcxproj", "{A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}"\r
+ ProjectSection(ProjectDependencies) = postProject\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0} = {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}\r
+ EndProjectSection\r
+EndProject\r
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "liblfds700", "..\..\..\liblfds700\build\visual_studio_professional_2012\liblfds700.vcxproj", "{1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}"\r
+EndProject\r
+Global\r
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution\r
+ Debug DLL|Win32 = Debug DLL|Win32\r
+ Debug DLL|x64 = Debug DLL|x64\r
+ Debug LIB|Win32 = Debug LIB|Win32\r
+ Debug LIB|x64 = Debug LIB|x64\r
+ Release DLL|Win32 = Release DLL|Win32\r
+ Release DLL|x64 = Release DLL|x64\r
+ Release LIB|Win32 = Release LIB|Win32\r
+ Release LIB|x64 = Release LIB|x64\r
+ EndGlobalSection\r
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Debug DLL|Win32.Build.0 = Debug DLL|Win32\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Debug DLL|Win32.Deploy.0 = Debug DLL|Win32\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Debug DLL|x64.ActiveCfg = Debug DLL|x64\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Debug DLL|x64.Build.0 = Debug DLL|x64\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Debug DLL|x64.Deploy.0 = Debug DLL|x64\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Debug LIB|Win32.ActiveCfg = Debug LIB|Win32\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Debug LIB|Win32.Build.0 = Debug LIB|Win32\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Debug LIB|Win32.Deploy.0 = Debug LIB|Win32\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Debug LIB|x64.ActiveCfg = Debug LIB|x64\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Debug LIB|x64.Build.0 = Debug LIB|x64\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Debug LIB|x64.Deploy.0 = Debug LIB|x64\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Release DLL|Win32.ActiveCfg = Release DLL|Win32\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Release DLL|Win32.Build.0 = Release DLL|Win32\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Release DLL|Win32.Deploy.0 = Release DLL|Win32\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Release DLL|x64.ActiveCfg = Release DLL|x64\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Release DLL|x64.Build.0 = Release DLL|x64\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Release DLL|x64.Deploy.0 = Release DLL|x64\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Release LIB|Win32.ActiveCfg = Release LIB|Win32\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Release LIB|Win32.Build.0 = Release LIB|Win32\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Release LIB|Win32.Deploy.0 = Release LIB|Win32\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Release LIB|x64.ActiveCfg = Release LIB|x64\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Release LIB|x64.Build.0 = Release LIB|x64\r
+ {A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}.Release LIB|x64.Deploy.0 = Release LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|Win32.ActiveCfg = Debug DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|Win32.Build.0 = Debug DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|x64.ActiveCfg = Debug DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug DLL|x64.Build.0 = Debug DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|Win32.ActiveCfg = Debug LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|Win32.Build.0 = Debug LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|x64.ActiveCfg = Debug LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Debug LIB|x64.Build.0 = Debug LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|Win32.ActiveCfg = Release DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|Win32.Build.0 = Release DLL|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|x64.ActiveCfg = Release DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release DLL|x64.Build.0 = Release DLL|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|Win32.ActiveCfg = Release LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|Win32.Build.0 = Release LIB|Win32\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|x64.ActiveCfg = Release LIB|x64\r
+ {1E5D7D09-94F2-455D-AE5E-6C7F4C96BCE0}.Release LIB|x64.Build.0 = Release LIB|x64\r
+ EndGlobalSection\r
+ GlobalSection(SolutionProperties) = preSolution\r
+ HideSolutionNode = FALSE\r
+ EndGlobalSection\r
+EndGlobal\r
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup Label="ProjectConfigurations">\r
+ <ProjectConfiguration Include="Debug DLL|Win32">\r
+ <Configuration>Debug DLL</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug DLL|x64">\r
+ <Configuration>Debug DLL</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug LIB|Win32">\r
+ <Configuration>Debug LIB</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Debug LIB|x64">\r
+ <Configuration>Debug LIB</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release DLL|Win32">\r
+ <Configuration>Release DLL</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release DLL|x64">\r
+ <Configuration>Release DLL</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release LIB|Win32">\r
+ <Configuration>Release LIB</Configuration>\r
+ <Platform>Win32</Platform>\r
+ </ProjectConfiguration>\r
+ <ProjectConfiguration Include="Release LIB|x64">\r
+ <Configuration>Release LIB</Configuration>\r
+ <Platform>x64</Platform>\r
+ </ProjectConfiguration>\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClInclude Include="..\..\src\internal.h" />\r
+ <ClInclude Include="..\..\src\test_porting_abstraction_layer_operating_system.h" />\r
+ <ClInclude Include="..\..\src\util_cmdline.h" />\r
+ <ClInclude Include="..\..\src\util_memory_helpers.h" />\r
+ <ClInclude Include="..\..\src\util_thread_starter.h" />\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClCompile Include="..\..\src\main.c" />\r
+ <ClCompile Include="..\..\src\misc.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_btree_addonly_unbalanced.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_btree_addonly_unbalanced_alignment.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_btree_addonly_unbalanced_random_adds_fail.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_btree_addonly_unbalanced_random_adds_fail_and_overwrite.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_btree_addonly_unbalanced_random_adds_overwrite.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_freelist.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_freelist_alignment.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_freelist_popping.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_freelist_popping_and_pushing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_freelist_pushing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_freelist_rapid_popping_and_pushing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_hash_addonly.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_hash_addonly_alignment.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_hash_addonly_iterate.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_hash_addonly_random_adds_fail.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_hash_addonly_random_adds_fail_and_overwrite.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_hash_addonly_random_adds_overwrite.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_ordered_singlylinked.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_ordered_singlylinked_alignment.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_ordered_singlylinked_new_ordered.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_ordered_singlylinked_new_ordered_with_cursor.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_singlylinked_unordered.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_singlylinked_unordered_alignment.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_singlylinked_unordered_new_after.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_singlylinked_unordered_new_end.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_singlylinked_unordered_new_start.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_porting_abstraction_layer_atomic.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_porting_abstraction_layer_atomic_cas.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_porting_abstraction_layer_atomic_dcas.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_porting_abstraction_layer_atomic_exchange.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_queue.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_alignment.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_bounded_singleconsumer_singleproducer.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_bounded_singleconsumer_singleproducer_dequeuing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_bounded_singleconsumer_singleproducer_enqueuing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_bounded_singleconsumer_singleproducer_enqueuing_and_dequeuing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_dequeuing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_enqueuing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_enqueuing_and_dequeuing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_enqueuing_and_dequeuing_with_free.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_enqueuing_with_malloc_and_dequeuing_with_free.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_rapid_enqueuing_and_dequeuing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_ringbuffer.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_ringbuffer_reading.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_ringbuffer_reading_and_writing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_ringbuffer_writing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_stack.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_stack_alignment.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_stack_popping.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_stack_popping_and_pushing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_stack_pushing.c" />\r
+ <ClCompile Include="..\..\src\test_lfds700_stack_rapid_popping_and_pushing.c" />\r
+ <ClCompile Include="..\..\src\test_porting_abstraction_layer_get_logical_core_ids.c" />\r
+ <ClCompile Include="..\..\src\test_porting_abstraction_layer_thread_start.c" />\r
+ <ClCompile Include="..\..\src\test_porting_abstraction_layer_thread_wait.c" />\r
+ <ClCompile Include="..\..\src\util_cmdline.c" />\r
+ <ClCompile Include="..\..\src\util_memory_helpers.c" />\r
+ <ClCompile Include="..\..\src\util_thread_starter.c" />\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ProjectReference Include="..\..\..\liblfds700\build\visual_studio_professional_2012\liblfds700.vcxproj">\r
+ <Project>{1e5d7d09-94f2-455d-ae5e-6c7f4c96bce0}</Project>\r
+ </ProjectReference>\r
+ </ItemGroup>\r
+ <PropertyGroup Label="Globals">\r
+ <ProjectGuid>{A6BA3A68-A1D4-4C07-A0D5-7EAE73272A43}</ProjectGuid>\r
+ <Keyword>Win32Proj</Keyword>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>true</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'" Label="Configuration">\r
+ <ConfigurationType>Application</ConfigurationType>\r
+ <UseDebugLibraries>false</UseDebugLibraries>\r
+ <PlatformToolset>v110</PlatformToolset>\r
+ </PropertyGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />\r
+ <ImportGroup Label="ExtensionSettings">\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <ImportGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'" Label="PropertySheets">\r
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />\r
+ </ImportGroup>\r
+ <PropertyGroup Label="UserMacros" />\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'">\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'">\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'">\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'">\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'">\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'">\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'">\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'">\r
+ <LinkIncremental>false</LinkIncremental>\r
+ <OutDir>$(ProjectDir)..\..\bin\$(Platform) $(Configuration)\</OutDir>\r
+ <IntDir>$(ProjectDir)..\..\obj\$(Platform) $(Configuration)\</IntDir>\r
+ </PropertyGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <FunctionLevelLinking>false</FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>false</EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>NoExtensions</EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <AdditionalIncludeDirectories>..\liblfds700\inc;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <BufferSecurityCheck>true</BufferSecurityCheck>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Console</SubSystem>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <LinkTimeCodeGeneration>\r
+ </LinkTimeCodeGeneration>\r
+ <LinkStatus>\r
+ </LinkStatus>\r
+ <AdditionalDependencies>kernel32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <FunctionLevelLinking>false</FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>false</EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>NoExtensions</EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <AdditionalIncludeDirectories>..\liblfds700\inc;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>\r
+ <BufferSecurityCheck>true</BufferSecurityCheck>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Console</SubSystem>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <LinkTimeCodeGeneration>\r
+ </LinkTimeCodeGeneration>\r
+ <LinkStatus>\r
+ </LinkStatus>\r
+ <AdditionalDependencies>kernel32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug DLL|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <FunctionLevelLinking>false</FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>false</EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <BufferSecurityCheck>true</BufferSecurityCheck>\r
+ <OmitFramePointers>false</OmitFramePointers>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Console</SubSystem>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <LinkTimeCodeGeneration>\r
+ </LinkTimeCodeGeneration>\r
+ <AdditionalDependencies>kernel32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug LIB|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>ProgramDatabase</DebugInformationFormat>\r
+ <Optimization>Disabled</Optimization>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <SDLCheck>true</SDLCheck>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <StringPooling>true</StringPooling>\r
+ <MinimalRebuild>false</MinimalRebuild>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <FunctionLevelLinking>false</FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>false</EnableParallelCodeGeneration>\r
+ <EnableEnhancedInstructionSet>NotSet</EnableEnhancedInstructionSet>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <BrowseInformation>true</BrowseInformation>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ <BufferSecurityCheck>true</BufferSecurityCheck>\r
+ <OmitFramePointers>false</OmitFramePointers>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>true</GenerateDebugInformation>\r
+ <SubSystem>Console</SubSystem>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <LinkTimeCodeGeneration>\r
+ </LinkTimeCodeGeneration>\r
+ <AdditionalDependencies>kernel32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ </Link>\r
+ <ProjectReference>\r
+ <LinkLibraryDependencies>true</LinkLibraryDependencies>\r
+ </ProjectReference>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <SDLCheck>\r
+ </SDLCheck>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ <StringPooling>true</StringPooling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>true</EnableParallelCodeGeneration>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>false</GenerateDebugInformation>\r
+ <SubSystem>Console</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <SetChecksum>true</SetChecksum>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <AdditionalDependencies>kernel32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <LinkTimeCodeGeneration>UseLinkTimeCodeGeneration</LinkTimeCodeGeneration>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|Win32'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <SDLCheck>\r
+ </SDLCheck>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ <StringPooling>true</StringPooling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>true</EnableParallelCodeGeneration>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ </ClCompile>\r
+ <Link>\r
+ <TargetMachine>MachineX86</TargetMachine>\r
+ <GenerateDebugInformation>false</GenerateDebugInformation>\r
+ <SubSystem>Console</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <SetChecksum>true</SetChecksum>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <AdditionalDependencies>kernel32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <LinkTimeCodeGeneration>UseLinkTimeCodeGeneration</LinkTimeCodeGeneration>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release DLL|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <SDLCheck>\r
+ </SDLCheck>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ <StringPooling>true</StringPooling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>true</EnableParallelCodeGeneration>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>false</GenerateDebugInformation>\r
+ <SubSystem>Console</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <SetChecksum>true</SetChecksum>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <AdditionalDependencies>kernel32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <LinkTimeCodeGeneration>UseLinkTimeCodeGeneration</LinkTimeCodeGeneration>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release LIB|x64'">\r
+ <ClCompile>\r
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>\r
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>\r
+ <WarningLevel>Level4</WarningLevel>\r
+ <DebugInformationFormat>None</DebugInformationFormat>\r
+ <CompileAsManaged>false</CompileAsManaged>\r
+ <CompileAsWinRT>false</CompileAsWinRT>\r
+ <ExceptionHandling>false</ExceptionHandling>\r
+ <CreateHotpatchableImage>false</CreateHotpatchableImage>\r
+ <CompileAs>CompileAsC</CompileAs>\r
+ <TreatWarningAsError>true</TreatWarningAsError>\r
+ <SDLCheck>\r
+ </SDLCheck>\r
+ <MultiProcessorCompilation>true</MultiProcessorCompilation>\r
+ <InlineFunctionExpansion>AnySuitable</InlineFunctionExpansion>\r
+ <IntrinsicFunctions>true</IntrinsicFunctions>\r
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>\r
+ <OmitFramePointers>true</OmitFramePointers>\r
+ <EnableFiberSafeOptimizations>true</EnableFiberSafeOptimizations>\r
+ <WholeProgramOptimization>true</WholeProgramOptimization>\r
+ <StringPooling>true</StringPooling>\r
+ <BufferSecurityCheck>false</BufferSecurityCheck>\r
+ <FunctionLevelLinking>true</FunctionLevelLinking>\r
+ <EnableParallelCodeGeneration>true</EnableParallelCodeGeneration>\r
+ <FloatingPointExceptions>false</FloatingPointExceptions>\r
+ <RuntimeTypeInfo>false</RuntimeTypeInfo>\r
+ <OpenMPSupport>false</OpenMPSupport>\r
+ <ErrorReporting>None</ErrorReporting>\r
+ </ClCompile>\r
+ <Link>\r
+ <GenerateDebugInformation>false</GenerateDebugInformation>\r
+ <SubSystem>Console</SubSystem>\r
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>\r
+ <OptimizeReferences>true</OptimizeReferences>\r
+ <TreatLinkerWarningAsErrors>true</TreatLinkerWarningAsErrors>\r
+ <SetChecksum>true</SetChecksum>\r
+ <LinkErrorReporting>NoErrorReport</LinkErrorReporting>\r
+ <AdditionalDependencies>kernel32.lib;%(AdditionalDependencies)</AdditionalDependencies>\r
+ <LinkTimeCodeGeneration>UseLinkTimeCodeGeneration</LinkTimeCodeGeneration>\r
+ </Link>\r
+ </ItemDefinitionGroup>\r
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />\r
+ <ImportGroup Label="ExtensionTargets">\r
+ </ImportGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <ItemGroup>\r
+ <Filter Include="Source Files">\r
+ <UniqueIdentifier>{4FC737F1-C7A5-4376-A066-2A32D752A2FF}</UniqueIdentifier>\r
+ <Extensions>cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx</Extensions>\r
+ </Filter>\r
+ <Filter Include="Header Files">\r
+ <UniqueIdentifier>{93995380-89BD-4b04-88EB-625FBE52EBFB}</UniqueIdentifier>\r
+ <Extensions>h;hpp;hxx;hm;inl;inc;xsd</Extensions>\r
+ </Filter>\r
+ <Filter Include="Resource Files">\r
+ <UniqueIdentifier>{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}</UniqueIdentifier>\r
+ <Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav</Extensions>\r
+ </Filter>\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClInclude Include="..\..\src\internal.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\test_porting_abstraction_layer_operating_system.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\util_cmdline.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\util_memory_helpers.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
+ <ClInclude Include="..\..\src\util_thread_starter.h">\r
+ <Filter>Header Files</Filter>\r
+ </ClInclude>\r
+ </ItemGroup>\r
+ <ItemGroup>\r
+ <ClCompile Include="..\..\src\main.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\misc.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_btree_addonly_unbalanced.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_btree_addonly_unbalanced_alignment.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_btree_addonly_unbalanced_random_adds_fail.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_btree_addonly_unbalanced_random_adds_fail_and_overwrite.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_btree_addonly_unbalanced_random_adds_overwrite.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_freelist.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_freelist_alignment.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_freelist_popping.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_freelist_popping_and_pushing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_freelist_pushing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_freelist_rapid_popping_and_pushing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_hash_addonly.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_hash_addonly_alignment.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_hash_addonly_iterate.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_hash_addonly_random_adds_fail.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_hash_addonly_random_adds_fail_and_overwrite.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_hash_addonly_random_adds_overwrite.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_ordered_singlylinked.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_ordered_singlylinked_alignment.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_ordered_singlylinked_new_ordered.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_ordered_singlylinked_new_ordered_with_cursor.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_singlylinked_unordered.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_singlylinked_unordered_alignment.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_singlylinked_unordered_new_after.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_singlylinked_unordered_new_end.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_list_addonly_singlylinked_unordered_new_start.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_porting_abstraction_layer_atomic.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_porting_abstraction_layer_atomic_cas.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_porting_abstraction_layer_atomic_dcas.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_porting_abstraction_layer_atomic_exchange.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_queue.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_alignment.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_bounded_singleconsumer_singleproducer.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_bounded_singleconsumer_singleproducer_dequeuing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_bounded_singleconsumer_singleproducer_enqueuing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_bounded_singleconsumer_singleproducer_enqueuing_and_dequeuing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_dequeuing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_enqueuing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_enqueuing_and_dequeuing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_enqueuing_and_dequeuing_with_free.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_enqueuing_with_malloc_and_dequeuing_with_free.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_queue_rapid_enqueuing_and_dequeuing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_ringbuffer.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_ringbuffer_reading.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_ringbuffer_reading_and_writing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_ringbuffer_writing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_stack.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_stack_alignment.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_stack_popping.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_stack_popping_and_pushing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_stack_pushing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_lfds700_stack_rapid_popping_and_pushing.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_porting_abstraction_layer_get_logical_core_ids.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_porting_abstraction_layer_thread_start.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\test_porting_abstraction_layer_thread_wait.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\util_cmdline.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\util_memory_helpers.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ <ClCompile Include="..\..\src\util_thread_starter.c">\r
+ <Filter>Source Files</Filter>\r
+ </ClCompile>\r
+ </ItemGroup>\r
+</Project>
\ No newline at end of file
--- /dev/null
+<?xml version="1.0" encoding="utf-8"?>\r
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">\r
+ <PropertyGroup />\r
+</Project>
\ No newline at end of file
--- /dev/null
+/***** includes *****/
+#define _GNU_SOURCE
+#include <assert.h>
+#include <ctype.h>
+#include <limits.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include "../../liblfds700/inc/liblfds700.h"
+#include "test_porting_abstraction_layer_operating_system.h"
+
+/***** defines *****/
+#define and &&
+#define or ||
+
+#define NO_FLAGS 0x0
+
+#define BITS_PER_BYTE 8
+
+#define TEST_DURATION_IN_SECONDS 5
+#define TIME_LOOP_COUNT 10000
+#define REDUCED_TIME_LOOP_COUNT 1000
+#define NUMBER_OF_NANOSECONDS_IN_ONE_SECOND 1000000000LLU
+#define ONE_MEGABYTE_IN_BYTES (1024 * 1024)
+#define DEFAULT_TEST_MEMORY_IN_MEGABYTES 512U
+#define TEST_PAL_DEFAULT_NUMA_NODE_ID 0
+#define LFDS700_TEST_VERSION_STRING "7.0.0"
+#define LFDS700_TEST_VERSION_INTEGER 700
+
+#if( defined _KERNEL_MODE )
+ #define MODE_TYPE_STRING "kernel-mode"
+#endif
+
+#if( !defined _KERNEL_MODE )
+ #define MODE_TYPE_STRING "user-mode"
+#endif
+
+#if( defined NDEBUG && !defined COVERAGE && !defined TSAN )
+ #define BUILD_TYPE_STRING "release"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN )
+ #define BUILD_TYPE_STRING "debug"
+#endif
+
+#if( !defined NDEBUG && defined COVERAGE && !defined TSAN )
+ #define BUILD_TYPE_STRING "coverage"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && defined TSAN )
+ #define BUILD_TYPE_STRING "threadsanitizer"
+#endif
+
+/***** enums *****/
+enum flag
+{
+ LOWERED,
+ RAISED
+};
+
+/***** structs *****/
+struct test_pal_logical_processor
+{
+ lfds700_pal_uint_t
+ logical_processor_number,
+ windows_logical_processor_group_number;
+
+ struct lfds700_list_asu_element
+ lasue;
+};
+
+/***** prototypes *****/
+int main( int argc, char **argv );
+
+void internal_display_test_name( char *format_string, ... );
+void internal_display_test_result( lfds700_pal_uint_t number_name_dvs_pairs, ... );
+void internal_display_data_structure_validity( enum lfds700_misc_validity dvs );
+void internal_show_version( void );
+void internal_logical_core_id_element_cleanup_callback( struct lfds700_list_asu_state *lasus, struct lfds700_list_asu_element *lasue );
+
+int test_pal_thread_start( test_pal_thread_state_t *thread_state, struct test_pal_logical_processor *lp, test_pal_thread_return_t (TEST_PAL_CALLING_CONVENTION *thread_function)(void *thread_user_state), void *thread_user_state );
+void test_pal_thread_wait( test_pal_thread_state_t thread_state );
+void test_pal_get_logical_core_ids( struct lfds700_list_asu_state *lasus );
+
+void test_lfds700_pal_atomic( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_pal_atomic_cas( struct lfds700_list_asu_state *list_of_logical_processors );
+ void test_lfds700_pal_atomic_dwcas( struct lfds700_list_asu_state *list_of_logical_processors );
+ void test_lfds700_pal_atomic_exchange( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+
+void test_lfds700_hash_a( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_hash_a_alignment( void );
+ void test_lfds700_hash_a_fail_and_overwrite_on_existing_key( void );
+ void test_lfds700_hash_a_random_adds_fail_on_existing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_hash_a_random_adds_overwrite_on_existing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_hash_a_iterate( void );
+
+void test_lfds700_list_aos( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_list_aos_alignment( void );
+ void test_lfds700_list_aos_new_ordered( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_list_aos_new_ordered_with_cursor( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+
+void test_lfds700_list_asu( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_list_asu_alignment( void );
+ void test_lfds700_list_asu_new_start( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_list_asu_new_end( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_list_asu_new_after( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+
+void test_lfds700_btree_au( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_btree_au_alignment( void );
+ void test_lfds700_btree_au_fail_and_overwrite_on_existing_key( void );
+ void test_lfds700_btree_au_random_adds_fail_on_existing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_btree_au_random_adds_overwrite_on_existing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+
+void test_lfds700_freelist( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_freelist_alignment( void );
+ void test_lfds700_freelist_popping( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_freelist_pushing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_freelist_popping_and_pushing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_freelist_rapid_popping_and_pushing( struct lfds700_list_asu_state *list_of_logical_processors );
+ void test_lfds700_freelist_pushing_array( void );
+
+void test_lfds700_queue( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_queue_alignment( void );
+ void test_lfds700_queue_enqueuing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_queue_dequeuing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_queue_enqueuing_and_dequeuing( struct lfds700_list_asu_state *list_of_logical_processors );
+ void test_lfds700_queue_rapid_enqueuing_and_dequeuing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_queue_enqueuing_and_dequeuing_with_free( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_queue_enqueuing_with_malloc_and_dequeuing_with_free( struct lfds700_list_asu_state *list_of_logical_processors );
+
+void test_lfds700_queue_bss( struct lfds700_list_asu_state *list_of_logical_processors );
+ void test_lfds700_queue_bss_enqueuing( void );
+ void test_lfds700_queue_bss_dequeuing( void );
+ void test_lfds700_queue_bss_enqueuing_and_dequeuing( struct lfds700_list_asu_state *list_of_logical_processors );
+
+void test_lfds700_ringbuffer( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_ringbuffer_reading( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_ringbuffer_reading_and_writing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_ringbuffer_writing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+
+void test_lfds700_stack( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_stack_alignment( void );
+ void test_lfds700_stack_pushing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_stack_popping( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_stack_popping_and_pushing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes );
+ void test_lfds700_stack_rapid_popping_and_pushing( struct lfds700_list_asu_state *list_of_logical_processors );
+ void test_lfds700_stack_pushing_array( void );
+
+/***** late includes *****/
+#include "util_cmdline.h"
+#include "util_memory_helpers.h"
+#include "util_thread_starter.h"
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+int main( int argc, char **argv )
+{
+ enum flag
+ run_flag = LOWERED,
+ show_error_flag = LOWERED,
+ show_help_flag = LOWERED,
+ show_version_flag = LOWERED;
+
+ int
+ rv;
+
+ lfds700_pal_uint_t
+ loop,
+ iterations = 1,
+ memory_in_megabytes = DEFAULT_TEST_MEMORY_IN_MEGABYTES;
+
+ struct lfds700_list_asu_state
+ list_of_logical_processors;
+
+ struct util_cmdline_state
+ cs;
+
+ union util_cmdline_arg_data
+ *arg_data;
+
+ assert( argc >= 1 );
+ assert( argv != NULL );
+
+ lfds700_misc_library_init_valid_on_current_logical_core();
+
+ util_cmdline_init( &cs );
+
+ util_cmdline_add_arg( &cs, 'h', LIBCOMMON_CMDLINE_ARG_TYPE_FLAG );
+ util_cmdline_add_arg( &cs, 'i', LIBCOMMON_CMDLINE_ARG_TYPE_INTEGER );
+ util_cmdline_add_arg( &cs, 'm', LIBCOMMON_CMDLINE_ARG_TYPE_INTEGER );
+ util_cmdline_add_arg( &cs, 'r', LIBCOMMON_CMDLINE_ARG_TYPE_FLAG );
+ util_cmdline_add_arg( &cs, 'v', LIBCOMMON_CMDLINE_ARG_TYPE_FLAG );
+
+ rv = util_cmdline_process_args( &cs, argc, argv );
+
+ if( rv == 0 )
+ show_error_flag = RAISED;
+
+ if( rv == 1 )
+ {
+ util_cmdline_get_arg_data( &cs, 'h', &arg_data );
+ if( arg_data != NULL )
+ show_help_flag = RAISED;
+
+ util_cmdline_get_arg_data( &cs, 'i', &arg_data );
+ if( arg_data != NULL )
+ iterations = (lfds700_pal_uint_t) arg_data->integer.integer;
+
+ util_cmdline_get_arg_data( &cs, 'm', &arg_data );
+ if( arg_data != NULL )
+ memory_in_megabytes = (lfds700_pal_uint_t) arg_data->integer.integer;
+
+ util_cmdline_get_arg_data( &cs, 'r', &arg_data );
+ if( arg_data != NULL )
+ run_flag = RAISED;
+
+ util_cmdline_get_arg_data( &cs, 'v', &arg_data );
+ if( arg_data != NULL )
+ show_version_flag = RAISED;
+ }
+
+ util_cmdline_cleanup( &cs );
+
+ if( argc == 1 or (run_flag == LOWERED and show_version_flag == LOWERED) )
+ show_help_flag = RAISED;
+
+ if( show_error_flag == RAISED )
+ {
+ printf( "\nInvalid arguments. Sorry - it's a simple parser, so no clues.\n"
+ "-h or run with no args to see the help text.\n" );
+
+ return( EXIT_SUCCESS );
+ }
+
+ if( show_help_flag == RAISED )
+ {
+ printf( "test -h -i [n] -m [n] -r -v\n"
+ " -h : help\n"
+ " -i [n] : number of iterations (default : 1)\n"
+ " -m [n] : memory for tests, in mb (default : %u)\n"
+ " -r : run (causes test to run; present so no args gives help)\n"
+ " -v : version\n", DEFAULT_TEST_MEMORY_IN_MEGABYTES );
+
+ return( EXIT_SUCCESS );
+ }
+
+ if( show_version_flag == RAISED )
+ {
+ internal_show_version();
+ return( EXIT_SUCCESS );
+ }
+
+ if( run_flag == RAISED )
+ {
+ test_pal_get_logical_core_ids( &list_of_logical_processors );
+
+ for( loop = 0 ; loop < (lfds700_pal_uint_t) iterations ; loop++ )
+ {
+ printf( "\n"
+ "Test Iteration %02llu\n"
+ "=================\n", (int long long unsigned) (loop+1) );
+
+ test_lfds700_pal_atomic( &list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_btree_au( &list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_freelist( &list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_hash_a( &list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_list_aos( &list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_list_asu( &list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_queue( &list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_queue_bss( &list_of_logical_processors );
+ test_lfds700_ringbuffer( &list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_stack( &list_of_logical_processors, memory_in_megabytes );
+ }
+
+ lfds700_list_asu_cleanup( &list_of_logical_processors, internal_logical_core_id_element_cleanup_callback );
+ }
+
+ lfds700_misc_library_cleanup();
+
+ return( EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void internal_display_test_name( char *format_string, ... )
+{
+ va_list
+ va;
+
+ assert( format_string != NULL );
+
+ va_start( va, format_string );
+
+ vprintf( format_string, va );
+
+ printf( "..." );
+
+ va_end( va );
+
+ fflush( stdout );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void internal_display_test_result( lfds700_pal_uint_t number_name_dvs_pairs, ... )
+{
+ char
+ *name;
+
+ enum flag
+ passed_flag = RAISED;
+
+ enum lfds700_misc_validity
+ dvs;
+
+ lfds700_pal_uint_t
+ loop;
+
+ va_list
+ va;
+
+ // TRD : number_name_dvs_pairs can be any value in its range
+
+ va_start( va, number_name_dvs_pairs );
+
+ for( loop = 0 ; loop < number_name_dvs_pairs ; loop++ )
+ {
+ name = va_arg( va, char * );
+ dvs = va_arg( va, enum lfds700_misc_validity );
+
+ if( dvs != LFDS700_MISC_VALIDITY_VALID )
+ {
+ passed_flag = LOWERED;
+ break;
+ }
+ }
+
+ va_end( va );
+
+ if( passed_flag == RAISED )
+ puts( "passed" );
+
+ if( passed_flag == LOWERED )
+ {
+ printf( "failed (" );
+
+ va_start( va, number_name_dvs_pairs );
+
+ for( loop = 0 ; loop < number_name_dvs_pairs ; loop++ )
+ {
+ name = va_arg( va, char * );
+ dvs = va_arg( va, enum lfds700_misc_validity );
+
+ printf( "%s ", name );
+ internal_display_data_structure_validity( dvs );
+
+ if( loop+1 < number_name_dvs_pairs )
+ printf( ", " );
+ }
+
+ va_end( va );
+
+ printf( ")\n" );
+
+ /* TRD : quick hack
+ the whole test programme needs rewriting
+ and for now I just want to make it so we
+ exit with failure upon any test failing
+ */
+
+ exit( EXIT_FAILURE );
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void internal_display_data_structure_validity( enum lfds700_misc_validity dvs )
+{
+ char
+ *string = NULL;
+
+ switch( dvs )
+ {
+ case LFDS700_MISC_VALIDITY_VALID:
+ string = "valid";
+ break;
+
+ case LFDS700_MISC_VALIDITY_INVALID_LOOP:
+ string = "invalid - loop detected";
+ break;
+
+ case LFDS700_MISC_VALIDITY_INVALID_ORDER:
+ string = "invalid - invalid order detected";
+ break;
+
+ case LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS:
+ string = "invalid - missing elements";
+ break;
+
+ case LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS:
+ string = "invalid - additional elements";
+ break;
+
+ case LFDS700_MISC_VALIDITY_INVALID_TEST_DATA:
+ string = "invalid - invalid test data";
+ break;
+ }
+
+ printf( "%s", string );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void internal_show_version()
+{
+ char const
+ *version_and_build_string;
+
+ printf( "test %s (%s, %s) (" __DATE__ " " __TIME__ ")\n", LFDS700_TEST_VERSION_STRING, BUILD_TYPE_STRING, MODE_TYPE_STRING );
+
+ lfds700_misc_query( LFDS700_MISC_QUERY_GET_BUILD_AND_VERSION_STRING, NULL, (void **) &version_and_build_string );
+
+ printf( "%s\n", version_and_build_string );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void internal_logical_core_id_element_cleanup_callback( struct lfds700_list_asu_state *lasus, struct lfds700_list_asu_element *lasue )
+{
+ struct test_pal_logical_processor
+ *lp;
+
+ assert( lasus != NULL );
+ assert( lasue != NULL );
+
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ util_aligned_free( lp );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_btree_au( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ if( LFDS700_MISC_ATOMIC_SUPPORT_CAS and LFDS700_MISC_ATOMIC_SUPPORT_EXCHANGE )
+ {
+ printf( "\n"
+ "Binary Tree (add-only, unbalanced) Tests\n"
+ "========================================\n" );
+
+ test_lfds700_btree_au_alignment();
+ test_lfds700_btree_au_fail_and_overwrite_on_existing_key();
+ test_lfds700_btree_au_random_adds_fail_on_existing( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_btree_au_random_adds_overwrite_on_existing( list_of_logical_processors, memory_in_megabytes );
+ }
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_btree_au_alignment()
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ internal_display_test_name( "Alignment" );
+
+
+
+ // TRD : struct lfds700_btree_au_element
+ if( offsetof(struct lfds700_btree_au_element,up) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_btree_au_element,left) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_btree_au_element,right) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_btree_au_element,key) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_btree_au_element,value) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_btree_au_element,key) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : struct lfds700_btree_au_state
+ if( offsetof(struct lfds700_btree_au_state,root) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_btree_au_state,key_compare_function) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "btree_au", dvs );
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds700_btree_au_element
+ baue;
+
+ lfds700_pal_uint_t
+ key;
+};
+
+struct test_state
+{
+ lfds700_pal_uint_t
+ insert_fail_count,
+ number_elements;
+
+ struct lfds700_btree_au_state
+ *baus;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_value, void const *value_in_tree );
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_adding( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_btree_au_random_adds_fail_on_existing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ actual_sum_insert_failure_count,
+ expected_sum_insert_failure_count,
+ index = 0,
+ *key_count_array,
+ loop,
+ number_elements,
+ number_logical_processors,
+ random_value,
+ subloop;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_btree_au_element
+ *baue = NULL;
+
+ struct lfds700_btree_au_state
+ baus;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ void
+ *key;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we create a single btree_au
+ we generate 10k elements per thread (one per logical processor) in an array
+ we set a random number in each element, which is the key
+ random numbers are generated are from 0 to 5000, so we must have some duplicates
+ (we don't use value, so we always pass in a NULL for that when we insert)
+
+ each thread loops, adds those elements into the btree, and counts the total number of insert fails
+ (we don't count on a per value basis because of the performance hit - we'll be TLBing all the time)
+ this test has the btree_au set to fail on add, so duplicates should be eliminated
+
+ we then merge the per-thread arrays
+
+ we should find in the tree one of every value, and the sum of the counts of each value (beyond the
+ first value, which was inserted) in the merged arrays should equal the sum of the insert fails from
+ each thread
+
+ we check the count of unique values in the merged array and use that when calling the btree_au validation function
+
+ we in-order walk and check that what we have in the tree matches what we have in the merged array
+ and then check the fail counts
+ */
+
+ internal_display_test_name( "Random adds and walking (fail on existing key)" );
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_btree_au_init_valid_on_current_logical_core( &baus, key_compare_function, LFDS700_BTREE_AU_EXISTING_KEY_FAIL, NULL );
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->baus = &baus;
+ (ts+loop)->element_array = util_aligned_malloc( sizeof(struct test_element) * number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ (ts+loop)->number_elements = number_elements;
+ (ts+loop)->insert_fail_count = 0;
+
+ for( subloop = 0 ; subloop < number_elements ; subloop++ )
+ {
+ random_value = LFDS700_MISC_PRNG_GENERATE( &ps );
+ ((ts+loop)->element_array+subloop)->key = (lfds700_pal_uint_t) floor( (number_elements/2) * ((double) random_value / (double) LFDS700_MISC_PRNG_MAX) );
+ }
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_adding, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ /* TRD : now for validation
+ make an array equal to number_elements, set all to 0
+ iterate over every per-thread array, counting the number of each value into this array
+ so we can know how many elements ought to have failed to be inserted
+ as well as being able to work out the actual number of elements which should be present in the btree, for the btree validation call
+ */
+
+ key_count_array = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_elements );
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ *(key_count_array+loop) = 0;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ for( subloop = 0 ; subloop < number_elements ; subloop++ )
+ ( *(key_count_array+( (ts+loop)->element_array+subloop)->key) )++;
+
+ // TRD : first, btree validation function
+ vi.min_elements = number_elements;
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( *(key_count_array+loop) == 0 )
+ vi.min_elements--;
+
+ vi.max_elements = vi.min_elements;
+
+ lfds700_btree_au_query( &baus, LFDS700_BTREE_AU_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) &dvs );
+
+ /* TRD : now check the sum of per-thread insert failures
+ is what it should be, which is the sum of key_count_array,
+ but with every count minus one (for the single succesful insert)
+ and where elements of 0 are ignored (i.e. do not have -1 applied)
+ */
+
+ expected_sum_insert_failure_count = 0;
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( *(key_count_array+loop) != 0 )
+ expected_sum_insert_failure_count += *(key_count_array+loop) - 1;
+
+ actual_sum_insert_failure_count = 0;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ actual_sum_insert_failure_count += (ts+loop)->insert_fail_count;
+
+ if( expected_sum_insert_failure_count != actual_sum_insert_failure_count )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ /* TRD : now compared the combined array and an in-order walk of the tree
+ ignoring array elements with the value 0, we should find an exact match
+ */
+
+ if( dvs == LFDS700_MISC_VALIDITY_VALID )
+ {
+ // TRD : in-order walk over btree_au and check key_count_array matches
+ while( dvs == LFDS700_MISC_VALIDITY_VALID and lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position(&baus, &baue, LFDS700_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS700_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ key = LFDS700_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue );
+
+ while( *(key_count_array+index) == 0 )
+ index++;
+
+ if( index++ != (lfds700_pal_uint_t) key )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+ }
+
+ // TRD : cleanup
+ free( key_count_array );
+
+ lfds700_btree_au_cleanup( &baus, NULL );
+
+ // TRD : cleanup
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ util_aligned_free( (ts+loop)->element_array );
+
+ free( ts );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "btree_au", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int key_compare_function( void const *new_key, void const *key_in_tree )
+{
+ int
+ cr = 0;
+
+ // TRD : key_new can be any value in its range
+ // TRD : key_in_tree can be any value in its range
+
+ if( (lfds700_pal_uint_t) new_key < (lfds700_pal_uint_t) key_in_tree )
+ cr = -1;
+
+ if( (lfds700_pal_uint_t) new_key > (lfds700_pal_uint_t) key_in_tree )
+ cr = 1;
+
+ return( cr );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_adding( void *util_thread_starter_thread_state )
+{
+ enum lfds700_btree_au_insert_result
+ alr;
+
+ lfds700_pal_uint_t
+ index = 0;
+
+ struct test_state
+ *ts;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ while( index < ts->number_elements )
+ {
+ LFDS700_BTREE_AU_SET_KEY_IN_ELEMENT( (ts->element_array+index)->baue, (ts->element_array+index)->key );
+ LFDS700_BTREE_AU_SET_VALUE_IN_ELEMENT( (ts->element_array+index)->baue, 0 );
+ alr = lfds700_btree_au_insert( ts->baus, &(ts->element_array+index)->baue, NULL, &ps );
+
+ if( alr == LFDS700_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY )
+ ts->insert_fail_count++;
+
+ index++;
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_value, void const *value_in_tree );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_btree_au_fail_and_overwrite_on_existing_key()
+{
+ enum lfds700_btree_au_insert_result
+ alr;
+
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ struct lfds700_btree_au_element
+ baue_one,
+ baue_two,
+ *existing_baue;
+
+ struct lfds700_btree_au_state
+ baus;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ void
+ *value;
+
+ /* TRD : the random_adds tests with fail and overwrite don't (can't, not in a performant manner)
+ test that the fail and/or overwrite of user data has *actually* happened - they use the
+ return value from the link function call, rather than empirically observing the final
+ state of the tree
+
+ as such, we now have a couple of single threaded tests where we check that the user data
+ value really is being modified (or not modified, as the case may be)
+ */
+
+ internal_display_test_name( "Fail and overwrite on existing key" );
+
+ lfds700_misc_prng_init( &ps );
+
+ /* TRD : so, we make a tree which is fail on existing
+ add one element, with a known user data
+ we then try to add the same key again, with a different user data
+ the call should fail, and then we get the element by its key
+ and check its user data is unchanged
+ (and confirm the failed link returned the correct existing_baue)
+ that's the first test done
+ */
+
+ lfds700_btree_au_init_valid_on_current_logical_core( &baus, key_compare_function, LFDS700_BTREE_AU_EXISTING_KEY_FAIL, NULL );
+
+ LFDS700_BTREE_AU_SET_KEY_IN_ELEMENT( baue_one, 0 );
+ LFDS700_BTREE_AU_SET_VALUE_IN_ELEMENT( baue_one, 1 );
+ alr = lfds700_btree_au_insert( &baus, &baue_one, NULL, &ps );
+
+ if( alr != LFDS700_BTREE_AU_INSERT_RESULT_SUCCESS )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ LFDS700_BTREE_AU_SET_KEY_IN_ELEMENT( baue_two, 0 );
+ LFDS700_BTREE_AU_SET_VALUE_IN_ELEMENT( baue_two, 2 );
+ alr = lfds700_btree_au_insert( &baus, &baue_two, &existing_baue, &ps );
+
+ if( alr != LFDS700_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( existing_baue != &baue_one )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ value = LFDS700_BTREE_AU_GET_VALUE_FROM_ELEMENT( *existing_baue );
+
+ if( (void *) (lfds700_pal_uint_t) value != (void *) (lfds700_pal_uint_t) 1 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ lfds700_btree_au_cleanup( &baus, NULL );
+
+ /* TRD : second test, make a tree which is overwrite on existing
+ add one element, with a known user data
+ we then try to add the same key again, with a different user data
+ the call should succeed, and then we get the element by its key
+ and check its user data is changed
+ (and confirm the failed link returned the correct existing_baue)
+ that's the secondtest done
+ */
+
+ lfds700_btree_au_init_valid_on_current_logical_core( &baus, key_compare_function, LFDS700_BTREE_AU_EXISTING_KEY_OVERWRITE, NULL );
+
+ LFDS700_BTREE_AU_SET_KEY_IN_ELEMENT( baue_one, 0 );
+ LFDS700_BTREE_AU_SET_VALUE_IN_ELEMENT( baue_one, 1 );
+ alr = lfds700_btree_au_insert( &baus, &baue_one, NULL, &ps );
+
+ if( alr != LFDS700_BTREE_AU_INSERT_RESULT_SUCCESS )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ LFDS700_BTREE_AU_SET_KEY_IN_ELEMENT( baue_two, 0 );
+ LFDS700_BTREE_AU_SET_VALUE_IN_ELEMENT( baue_two, 2 );
+ alr = lfds700_btree_au_insert( &baus, &baue_two, NULL, &ps );
+
+ if( alr != LFDS700_BTREE_AU_INSERT_RESULT_SUCCESS_OVERWRITE )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ lfds700_btree_au_cleanup( &baus, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "btree_au", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int key_compare_function( void const *new_key, void const *key_in_tree )
+{
+ int
+ cr = 0;
+
+ // TRD : key_new can be any value in its range
+ // TRD : key_in_tree can be any value in its range
+
+ if( (lfds700_pal_uint_t) new_key < (lfds700_pal_uint_t) key_in_tree )
+ cr = -1;
+
+ if( (lfds700_pal_uint_t) new_key > (lfds700_pal_uint_t) key_in_tree )
+ cr = 1;
+
+ return( cr );
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds700_btree_au_element
+ baue;
+
+ lfds700_pal_uint_t
+ key;
+};
+
+struct test_state
+{
+ lfds700_pal_uint_t
+ insert_existing_count,
+ number_elements;
+
+ struct lfds700_btree_au_state
+ *baus;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_value, void const *value_in_tree );
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_adding( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_btree_au_random_adds_overwrite_on_existing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ actual_sum_insert_existing_count,
+ expected_sum_insert_existing_count,
+ index = 0,
+ *key_count_array,
+ loop,
+ number_elements,
+ number_logical_processors,
+ random_value,
+ subloop;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_btree_au_element
+ *baue = NULL;
+
+ struct lfds700_btree_au_state
+ baus;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ void
+ *key;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we create a single btree_au
+ we generate 10k elements per thread (one per logical processor) in an array
+ we set a random number in each element, which is the key
+ random numbers are generated are from 0 to 5000, so we must have some duplicates
+ (we don't use value, so we always pass in a NULL for that when we insert)
+
+ each thread loops, adds those elements into the btree, and counts the total number of insert fails
+ (we don't count on a per value basis because of the performance hit - we'll be TLBing all the time)
+ this test has the btree_au set to overwrite on add, so duplicates should be eliminated
+
+ we then merge the per-thread arrays
+
+ we should find in the tree one of every value, and the sum of the counts of each value (beyond the
+ first value, which was inserted) in the merged arrays should equal the sum of the existing_baues returned
+ from each thread when they inserted and found an existing element
+
+ we check the count of unique values in the merged array and use that when calling the btree_au validation function
+
+ we in-order walk and check that what we have in the tree matches what we have in the merged array
+ and then check the fail counts
+ */
+
+ internal_display_test_name( "Random adds and walking (overwrite on existing key)" );
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_btree_au_init_valid_on_current_logical_core( &baus, key_compare_function, LFDS700_BTREE_AU_EXISTING_KEY_OVERWRITE, NULL );
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->baus = &baus;
+ (ts+loop)->element_array = util_aligned_malloc( sizeof(struct test_element) * number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ (ts+loop)->number_elements = number_elements;
+ (ts+loop)->insert_existing_count = 0;
+
+ for( subloop = 0 ; subloop < number_elements ; subloop++ )
+ {
+ random_value = LFDS700_MISC_PRNG_GENERATE( &ps );
+ ((ts+loop)->element_array+subloop)->key = (lfds700_pal_uint_t) floor( (number_elements/2) * ((double) random_value / (double) LFDS700_MISC_PRNG_MAX) );
+ }
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_adding, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ /* TRD : now for validation
+ make an array equal to number_elements, set all to 0
+ iterate over every per-thread array, counting the number of each value into this array
+ so we can know how many elements ought to have failed to be inserted
+ as well as being able to work out the actual number of elements which should be present in the btree, for the btree validation call
+ */
+
+ key_count_array = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_elements );
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ *(key_count_array+loop) = 0;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ for( subloop = 0 ; subloop < number_elements ; subloop++ )
+ ( *(key_count_array+( (ts+loop)->element_array+subloop)->key) )++;
+
+ // TRD : first, btree validation function
+ vi.min_elements = number_elements;
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( *(key_count_array+loop) == 0 )
+ vi.min_elements--;
+
+ vi.max_elements = vi.min_elements;
+
+ lfds700_btree_au_query( &baus, LFDS700_BTREE_AU_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) &dvs );
+
+ /* TRD : now check the sum of per-thread insert failures
+ is what it should be, which is the sum of key_count_array,
+ but with every count minus one (for the single succesful insert)
+ and where elements of 0 are ignored (i.e. do not have -1 applied)
+ */
+
+ expected_sum_insert_existing_count = 0;
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( *(key_count_array+loop) != 0 )
+ expected_sum_insert_existing_count += *(key_count_array+loop) - 1;
+
+ actual_sum_insert_existing_count = 0;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ actual_sum_insert_existing_count += (ts+loop)->insert_existing_count;
+
+ if( expected_sum_insert_existing_count != actual_sum_insert_existing_count )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ /* TRD : now compared the combined array and an in-order walk of the tree
+ ignoring array elements with the value 0, we should find an exact match
+ */
+
+ if( dvs == LFDS700_MISC_VALIDITY_VALID )
+ {
+ // TRD : in-order walk over btree_au and check key_count_array matches
+ while( dvs == LFDS700_MISC_VALIDITY_VALID and lfds700_btree_au_get_by_absolute_position_and_then_by_relative_position(&baus, &baue, LFDS700_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS700_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ key = LFDS700_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue );
+
+ while( *(key_count_array+index) == 0 )
+ index++;
+
+ if( index++ != (lfds700_pal_uint_t) key )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+ }
+
+ // TRD : cleanup
+ free( key_count_array );
+
+ lfds700_btree_au_cleanup( &baus, NULL );
+
+ // TRD : cleanup
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ util_aligned_free( (ts+loop)->element_array );
+
+ free( ts );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "btree_au", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int key_compare_function( void const *new_key, void const *key_in_tree )
+{
+ int
+ cr = 0;
+
+ // TRD : key_new can be any value in its range
+ // TRD : key_in_tree can be any value in its range
+
+ if( (lfds700_pal_uint_t) new_key < (lfds700_pal_uint_t) key_in_tree )
+ cr = -1;
+
+ if( (lfds700_pal_uint_t) new_key > (lfds700_pal_uint_t) key_in_tree )
+ cr = 1;
+
+ return( cr );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_adding( void *util_thread_starter_thread_state )
+{
+ enum lfds700_btree_au_insert_result
+ alr;
+
+ lfds700_pal_uint_t
+ index = 0;
+
+ struct test_state
+ *ts;
+
+ struct lfds700_btree_au_element
+ *existing_baue;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ while( index < ts->number_elements )
+ {
+ LFDS700_BTREE_AU_SET_KEY_IN_ELEMENT( (ts->element_array+index)->baue, (ts->element_array+index)->key );
+ LFDS700_BTREE_AU_SET_VALUE_IN_ELEMENT( (ts->element_array+index)->baue, 0 );
+ alr = lfds700_btree_au_insert( ts->baus, &(ts->element_array+index)->baue, &existing_baue, &ps );
+
+ if( alr == LFDS700_BTREE_AU_INSERT_RESULT_SUCCESS_OVERWRITE )
+ ts->insert_existing_count++;
+
+ index++;
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_freelist( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ if( LFDS700_MISC_ATOMIC_SUPPORT_DWCAS )
+ {
+ printf( "\n"
+ "Freelist Tests\n"
+ "==============\n" );
+
+ test_lfds700_freelist_alignment();
+ test_lfds700_freelist_popping( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_freelist_pushing( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_freelist_popping_and_pushing( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_freelist_rapid_popping_and_pushing( list_of_logical_processors );
+ }
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_freelist_alignment()
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ internal_display_test_name( "Alignment" );
+
+
+
+ // TRD : struct lfds700_freelist_state
+ if( offsetof(struct lfds700_freelist_state,top) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_freelist_state,user_state) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ struct lfds700_freelist_state
+ *fs;
+};
+
+struct test_element
+{
+ struct lfds700_freelist_element
+ fe;
+
+ enum flag
+ popped_flag;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_popping( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_freelist_popping( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_freelist_state
+ fs;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_misc_validation_info
+ vi = { 0, 0 };
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te_array;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we create a freelist with 1,000,000 elements
+
+ the creation function runs in a single thread and creates
+ and pushes those elements onto the freelist
+
+ each element contains a void pointer to the container test element
+
+ we then run one thread per CPU
+ where each thread loops, popping as quickly as possible
+ each test element has a flag which indicates it has been popped
+
+ the threads run till the source freelist is empty
+
+ we then check the test elements
+ every element should have been popped
+
+ then tidy up
+
+ we have no extra code for CAS/GC as we're only popping
+ */
+
+ internal_display_test_name( "Popping" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_freelist_init_valid_on_current_logical_core( &fs, NULL );
+
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / sizeof(struct test_element);
+
+ te_array = util_aligned_malloc( sizeof(struct test_element) * number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ {
+ (te_array+loop)->popped_flag = LOWERED;
+ LFDS700_FREELIST_SET_VALUE_IN_ELEMENT( (te_array+loop)->fe, te_array+loop );
+ lfds700_freelist_push( &fs, &(te_array+loop)->fe, &ps );
+ }
+
+ ts = util_aligned_malloc( sizeof(struct test_state) * number_logical_processors, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ (ts+loop)->fs = &fs;
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_popping, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ lfds700_freelist_query( &fs, LFDS700_FREELIST_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ // TRD : now we check each element has popped_flag set to RAISED
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( (te_array+loop)->popped_flag == LOWERED )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : cleanup
+ lfds700_freelist_cleanup( &fs, NULL );
+ util_aligned_free( ts );
+ util_aligned_free( te_array );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_popping( void *util_thread_starter_thread_state )
+{
+ struct lfds700_freelist_element
+ *fe;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_element
+ *te;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ while( lfds700_freelist_pop(ts->fs, &fe, &ps) )
+ {
+ te = LFDS700_FREELIST_GET_VALUE_FROM_ELEMENT( *fe );
+ te->popped_flag = RAISED;
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_element;
+
+struct test_state
+{
+ struct lfds700_freelist_state
+ fs_thread_local,
+ *fs;
+
+ lfds700_pal_uint_t
+ number_elements;
+
+ struct test_element
+ *fs_thread_local_te_array;
+};
+
+struct test_element
+{
+ struct lfds700_freelist_element
+ fe,
+ thread_local_fe;
+
+ lfds700_pal_uint_t
+ datum;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_popping_and_pushing_start_popping( void *util_thread_starter_thread_state );
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_popping_and_pushing_start_pushing( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_freelist_popping_and_pushing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors,
+ subloop;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_freelist_state
+ fs;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te_array;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we have two threads per CPU
+ the threads loop for ten seconds
+ the first thread pushes 10000 elements then pops 10000 elements
+ the second thread pops 10000 elements then pushes 10000 elements
+ all pushes and pops go onto the single main freelist
+ with a per-thread local freelist to store the pops
+
+ after time is up, all threads push what they have remaining onto
+ the main freelist
+
+ we then validate the main freelist
+ */
+
+ internal_display_test_name( "Popping and pushing (%d seconds)", TEST_DURATION_IN_SECONDS );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors * 2 );
+
+ lfds700_freelist_init_valid_on_current_logical_core( &fs, NULL );
+
+ // TRD : we allocate half the total elements here, and half again later, which is why *2 above, but not here
+ te_array = util_aligned_malloc( sizeof(struct test_element) * number_elements * number_logical_processors, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ // TRD : initial elements in the main freelist so the popping threads can start immediately
+ for( loop = 0 ; loop < number_elements * number_logical_processors ; loop++ )
+ {
+ (te_array+loop)->datum = loop;
+ LFDS700_FREELIST_SET_VALUE_IN_ELEMENT( (te_array+loop)->fe, te_array+loop );
+ lfds700_freelist_push( &fs, &(te_array+loop)->fe, &ps );
+ }
+
+ ts = util_aligned_malloc( sizeof(struct test_state) * number_logical_processors * 2, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ // TRD : first set of threads (poppers)
+ (ts+loop)->fs = &fs;
+ (ts+loop)->number_elements = number_elements;
+ lfds700_freelist_init_valid_on_current_logical_core( &(ts+loop)->fs_thread_local, NULL );
+
+ // TRD : second set of threads (pushers - who need elements in their per-thread freelists)
+ (ts+loop+number_logical_processors)->fs = &fs;
+ (ts+loop+number_logical_processors)->number_elements = number_elements;
+ lfds700_freelist_init_valid_on_current_logical_core( &(ts+loop+number_logical_processors)->fs_thread_local, NULL );
+
+ // TRD : fill the pushing thread freelists
+ (ts+loop+number_logical_processors)->fs_thread_local_te_array = util_aligned_malloc( sizeof(struct test_element) * number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( subloop = 0 ; subloop < number_elements ; subloop++ )
+ {
+ ((ts+loop+number_logical_processors)->fs_thread_local_te_array+subloop)->datum = loop;
+ LFDS700_FREELIST_SET_VALUE_IN_ELEMENT( ((ts+loop+number_logical_processors)->fs_thread_local_te_array+subloop)->thread_local_fe, (ts+loop+number_logical_processors)->fs_thread_local_te_array+subloop );
+ lfds700_freelist_push( &(ts+loop+number_logical_processors)->fs_thread_local, &((ts+loop+number_logical_processors)->fs_thread_local_te_array+subloop)->thread_local_fe, &ps );
+ }
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors * 2 );
+
+ util_thread_starter_new( &tts, number_logical_processors * 2 );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_popping_and_pushing_start_popping, ts+loop );
+ util_thread_starter_start( tts, &thread_handles[loop+number_logical_processors], loop+number_logical_processors, lp, thread_popping_and_pushing_start_pushing, ts+loop+number_logical_processors );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors * 2 ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements * number_logical_processors * 2;
+
+ lfds700_freelist_query( &fs, LFDS700_FREELIST_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) &dvs );
+
+ lfds700_freelist_cleanup( &fs, NULL );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ lfds700_freelist_cleanup( &(ts+loop)->fs_thread_local, NULL );
+ lfds700_freelist_cleanup( &(ts+loop+number_logical_processors)->fs_thread_local, NULL );
+ util_aligned_free( (ts+loop+number_logical_processors)->fs_thread_local_te_array );
+ }
+
+ util_aligned_free( ts );
+
+ util_aligned_free( te_array );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_popping_and_pushing_start_popping( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ count;
+
+ struct lfds700_freelist_element
+ *fe;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ struct test_state
+ *ts;
+
+ time_t
+ start_time;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ start_time = time( NULL );
+
+ while( time(NULL) < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ count = 0;
+
+ while( count < ts->number_elements )
+ if( lfds700_freelist_pop(ts->fs, &fe, &ps) )
+ {
+ // TRD : we do nothing with the test data, so there'ss no GET or SET here
+ lfds700_freelist_push( &ts->fs_thread_local, fe, &ps );
+ count++;
+ }
+
+ // TRD : return our local freelist to the main freelist
+ while( lfds700_freelist_pop(&ts->fs_thread_local, &fe, &ps) )
+ lfds700_freelist_push( ts->fs, fe, &ps );
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_popping_and_pushing_start_pushing( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ count;
+
+ struct lfds700_freelist_element
+ *fe;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ time_t
+ start_time;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ start_time = time( NULL );
+
+ while( time(NULL) < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ // TRD : return our local freelist to the main freelist
+ while( lfds700_freelist_pop(&ts->fs_thread_local, &fe, &ps) )
+ lfds700_freelist_push( ts->fs, fe, &ps );
+
+ count = 0;
+
+ while( count < ts->number_elements )
+ if( lfds700_freelist_pop(ts->fs, &fe, &ps) )
+ {
+ lfds700_freelist_push( &ts->fs_thread_local, fe, &ps );
+ count++;
+ }
+ }
+
+ // TRD : now push whatever we have in our local freelist
+ while( lfds700_freelist_pop(&ts->fs_thread_local, &fe, &ps) )
+ lfds700_freelist_push( ts->fs, fe, &ps );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ lfds700_pal_uint_t
+ number_elements,
+ thread_number;
+
+ struct lfds700_freelist_state
+ *fs;
+
+ struct test_element
+ *te_array;
+};
+
+struct test_element
+{
+ struct lfds700_freelist_element
+ fe;
+
+ lfds700_pal_uint_t
+ datum,
+ thread_number;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_pushing( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_freelist_pushing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors,
+ *per_thread_counters;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_freelist_element
+ *fe;
+
+ struct lfds700_freelist_state
+ fs;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te,
+ *first_te = NULL;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we create an empty freelist
+
+ we then create one thread per CPU, where each thread
+ pushes 100,000 elements each as quickly as possible to the freelist
+
+ the data pushed is a counter and a thread ID
+
+ the threads exit when the freelist is full
+
+ we then validate the freelist;
+
+ checking that the counts increment on a per unique ID basis
+ and that the number of elements we pop equals 100,000 per thread
+ (since each element has an incrementing counter which is
+ unique on a per unique ID basis, we can know we didn't lose
+ any elements)
+ */
+
+ internal_display_test_name( "Pushing" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors );
+
+ // TRD : the main freelist
+ lfds700_freelist_init_valid_on_current_logical_core( &fs, NULL );
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->fs = &fs;
+ (ts+loop)->thread_number = loop;
+ (ts+loop)->number_elements = number_elements;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_pushing, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = number_elements - 1;
+
+ vi.min_elements = vi.max_elements = number_elements * number_logical_processors;
+
+ lfds700_freelist_query( &fs, LFDS700_STACK_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ while( dvs == LFDS700_MISC_VALIDITY_VALID and lfds700_freelist_pop(&fs, &fe, &ps) )
+ {
+ te = LFDS700_FREELIST_GET_VALUE_FROM_ELEMENT( *fe );
+
+ if( first_te == NULL )
+ first_te = te;
+
+ if( te->thread_number >= number_logical_processors )
+ {
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( te->datum > per_thread_counters[te->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( te->datum < per_thread_counters[te->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( te->datum == per_thread_counters[te->thread_number] )
+ per_thread_counters[te->thread_number]--;
+ }
+
+ // TRD : clean up
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ util_aligned_free( (ts+loop)->te_array );
+
+ free( per_thread_counters );
+
+ free( ts );
+
+ lfds700_freelist_cleanup( &fs, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_pushing( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ loop;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ ts->te_array = util_aligned_malloc( sizeof(struct test_element) * ts->number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < ts->number_elements ; loop++ )
+ {
+ (ts->te_array+loop)->thread_number = ts->thread_number;
+ (ts->te_array+loop)->datum = loop;
+ }
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ for( loop = 0 ; loop < ts->number_elements ; loop++ )
+ {
+ LFDS700_FREELIST_SET_VALUE_IN_ELEMENT( (ts->te_array+loop)->fe, ts->te_array+loop );
+ lfds700_freelist_push( ts->fs, &(ts->te_array+loop)->fe, &ps );
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ struct lfds700_freelist_state
+ *fs;
+};
+
+struct test_element
+{
+ struct lfds700_freelist_element
+ fe;
+
+ lfds700_pal_uint_t
+ datum;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_rapid_popping_and_pushing( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_freelist_rapid_popping_and_pushing( struct lfds700_list_asu_state *list_of_logical_processors )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_logical_processors;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_freelist_state
+ fs;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_misc_validation_info
+ vi = { 0, 0 };
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te_array;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+
+ /* TRD : in these tests there is a fundamental antagonism between
+ how much checking/memory clean up that we do and the
+ likelyhood of collisions between threads in their lock-free
+ operations
+
+ the lock-free operations are very quick; if we do anything
+ much at all between operations, we greatly reduce the chance
+ of threads colliding
+
+ so we have some tests which do enough checking/clean up that
+ they can tell the freelist is valid and don't leak memory
+ and here, this test now is one of those which does minimal
+ checking - in fact, the nature of the test is that you can't
+ do any real checking - but goes very quickly
+
+ what we do is create a small freelist and then run one thread
+ per CPU, where each thread simply pushes and then immediately
+ pops
+
+ the test runs for ten seconds
+
+ after the test is done, the only check we do is to traverse
+ the freelist, checking for loops and ensuring the number of
+ elements is correct
+ */
+
+ internal_display_test_name( "Rapid popping and pushing (10 seconds)" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_freelist_init_valid_on_current_logical_core( &fs, NULL );
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ (ts+loop)->fs = &fs;
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ // TRD : we need one element per thread
+ te_array = util_aligned_malloc( sizeof(struct test_element) * number_logical_processors, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ LFDS700_FREELIST_SET_VALUE_IN_ELEMENT( (te_array+loop)->fe, te_array+loop );
+ lfds700_freelist_push( &fs, &(te_array+loop)->fe, &ps );
+ }
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_rapid_popping_and_pushing, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_logical_processors;
+
+ lfds700_freelist_query( &fs, LFDS700_STACK_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ lfds700_freelist_cleanup( &fs, NULL );
+
+ util_aligned_free( te_array );
+
+ free( ts );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_rapid_popping_and_pushing( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ time_loop = 0;
+
+ struct lfds700_freelist_element
+ *fe;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ lfds700_freelist_pop( ts->fs, &fe, &ps );
+ lfds700_freelist_push( ts->fs, fe, &ps );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_hash_a( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ if( LFDS700_MISC_ATOMIC_SUPPORT_CAS )
+ {
+ printf( "\n"
+ "Hash (add-only) Tests\n"
+ "=====================\n" );
+
+ test_lfds700_hash_a_alignment();
+ test_lfds700_hash_a_fail_and_overwrite_on_existing_key();
+ test_lfds700_hash_a_random_adds_fail_on_existing( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_hash_a_random_adds_overwrite_on_existing( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_hash_a_iterate();
+ }
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_hash_a_alignment()
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ internal_display_test_name( "Alignment" );
+
+
+
+ // TRD : struct lfds700_hash_a_element
+ if( offsetof(struct lfds700_hash_a_element,value) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "freelist", dvs );
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds700_btree_au_element
+ baue;
+
+ lfds700_pal_uint_t
+ datum;
+};
+
+struct test_state
+{
+ enum lfds700_misc_flag
+ error_flag;
+
+ struct lfds700_hash_a_state
+ *has;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+static void key_hash_function( void const *key, lfds700_pal_uint_t *hash );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_hash_a_iterate( void )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ *counter_array,
+ loop;
+
+ struct lfds700_hash_a_element
+ *hae;
+
+ struct lfds700_hash_a_iterate
+ hai;
+
+ struct lfds700_hash_a_state
+ has;
+
+ struct lfds700_hash_a_element
+ *element_array;
+
+ struct lfds700_btree_au_state
+ *baus;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ void
+ *value;
+
+ /* TRD : single-threaded test
+ we create a single hash_a
+ we populate with 1000 elements
+ where key and value is the number of the element (e.g. 0 to 999)
+ we then allocate 1000 counters, init to 0
+ we then iterate
+ we increment each element as we see it in the iterate
+ if any are missing or seen more than once, problemo!
+
+ we do this once with a table of 10, to ensure each table has (or almost certainly has) something in
+ and then a second tiem with a table of 10000, to ensure some empty tables exist
+ */
+
+ internal_display_test_name( "Iterate" );
+
+ lfds700_misc_prng_init( &ps );
+
+ element_array = util_aligned_malloc( sizeof(struct lfds700_hash_a_element) * 1000, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ counter_array = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * 1000 );
+
+ // TRD : first time around
+ baus = util_aligned_malloc( sizeof(struct lfds700_btree_au_state) * 10, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds700_hash_a_init_valid_on_current_logical_core( &has, baus, 10, key_compare_function, key_hash_function, LFDS700_HASH_A_EXISTING_KEY_FAIL, NULL );
+
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ {
+ LFDS700_HASH_A_SET_KEY_IN_ELEMENT( *(element_array+loop), loop );
+ LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( *(element_array+loop), loop );
+ lfds700_hash_a_insert( &has, element_array+loop, NULL, &ps );
+ }
+
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ *(counter_array+loop) = 0;
+
+ lfds700_hash_a_iterate_init( &has, &hai );
+
+ while( dvs == LFDS700_MISC_VALIDITY_VALID and lfds700_hash_a_iterate(&hai, &hae) )
+ {
+ value = LFDS700_HASH_A_GET_VALUE_FROM_ELEMENT( *hae );
+ ( *(counter_array + (lfds700_pal_uint_t) value) )++;
+ }
+
+ if( dvs == LFDS700_MISC_VALIDITY_VALID )
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ {
+ if( *(counter_array+loop) > 1 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( *(counter_array+loop) == 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+ }
+
+ lfds700_hash_a_cleanup( &has, NULL );
+
+ util_aligned_free( baus );
+
+ // TRD : second time around
+ if( dvs == LFDS700_MISC_VALIDITY_VALID )
+ {
+ baus = util_aligned_malloc( sizeof(struct lfds700_btree_au_state) * 10000, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds700_hash_a_init_valid_on_current_logical_core( &has, baus, 10000, key_compare_function, key_hash_function, LFDS700_HASH_A_EXISTING_KEY_FAIL, NULL );
+
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ {
+ LFDS700_HASH_A_SET_KEY_IN_ELEMENT( *(element_array+loop), loop );
+ LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( *(element_array+loop), loop );
+ lfds700_hash_a_insert( &has, element_array+loop, NULL, &ps );
+ }
+
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ *(counter_array+loop) = 0;
+
+ lfds700_hash_a_iterate_init( &has, &hai );
+
+ while( dvs == LFDS700_MISC_VALIDITY_VALID and lfds700_hash_a_iterate(&hai, &hae) )
+ {
+ value = LFDS700_HASH_A_GET_VALUE_FROM_ELEMENT( *hae );
+ ( *(counter_array + (lfds700_pal_uint_t) value ) )++;
+ }
+
+ if( dvs == LFDS700_MISC_VALIDITY_VALID )
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ {
+ if( *(counter_array+loop) > 1 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( *(counter_array+loop) == 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+ }
+
+ lfds700_hash_a_cleanup( &has, NULL );
+
+ util_aligned_free( baus );
+ }
+
+ // TRD : cleanup
+ util_aligned_free( element_array );
+ free( counter_array );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "hash_a", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ int
+ cr = 0;
+
+ // TRD : new_key can be NULL (i.e. 0)
+ // TRD : existing_key can be NULL (i.e. 0)
+
+ if( (lfds700_pal_uint_t) new_key < (lfds700_pal_uint_t) existing_key )
+ cr = -1;
+
+ if( (lfds700_pal_uint_t) new_key > (lfds700_pal_uint_t) existing_key )
+ cr = 1;
+
+ return( cr );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void key_hash_function( void const *key, lfds700_pal_uint_t *hash )
+{
+ // TRD : key can be NULL
+ assert( hash != NULL );
+
+ *hash = 0;
+
+ /* TRD : this function iterates over the user data
+ and we are using the void pointer AS user data
+ so here we need to pass in the addy of value
+ */
+
+ LFDS700_HASH_A_32BIT_HASH_FUNCTION( (void *) &key, sizeof(lfds700_pal_uint_t), *hash );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds700_hash_a_element
+ hae;
+
+ lfds700_pal_uint_t
+ datum,
+ key;
+};
+
+struct test_state
+{
+ enum flag
+ error_flag;
+
+ lfds700_pal_uint_t
+ number_elements_per_thread;
+
+ struct lfds700_hash_a_state
+ *has;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_adding( void *util_thread_starter_thread_state );
+static int key_compare_function( void const *new_key, void const *existing_key );
+static void key_hash_function( void const *key, lfds700_pal_uint_t *hash );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_hash_a_random_adds_fail_on_existing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements_per_thread,
+ number_elements_total,
+ number_logical_processors,
+ offset,
+ temp,
+ value;
+
+ struct lfds700_hash_a_element
+ *hae;
+
+ struct lfds700_hash_a_state
+ has;
+
+ struct lfds700_list_asu_element
+ *lasue = NULL;
+
+ struct lfds700_btree_au_state
+ *baus;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *element_array;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we create a single hash_a
+ we generate 100k elements per thread (with one thread per logical processor) in an array
+ each element is unique
+ we randomly sort the elements
+ then each thread loops, adds those elements into the hash_a
+ we check that each datum inserts okay - failure will occur on non-unique data, i.e. two identical keys
+ we should have no failures
+ we then call the hash_a validation function
+ then using the hash_a get() we check all the elements we added are present
+ */
+
+ internal_display_test_name( "Random adds and get (fail on existing key)" );
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ baus = util_aligned_malloc( sizeof(struct lfds700_btree_au_state) * 1000, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds700_hash_a_init_valid_on_current_logical_core( &has, baus, 1000, key_compare_function, key_hash_function, LFDS700_HASH_A_EXISTING_KEY_FAIL, NULL );
+
+ number_elements_per_thread = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors );
+ number_elements_total = number_elements_per_thread * number_logical_processors;
+
+ // TRD : created an ordered list of unique numbers
+ element_array = util_aligned_malloc( sizeof(struct test_element) * number_elements_total, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ {
+ (element_array+loop)->key = loop;
+ // TRD : + number_elements just to make it different to the key
+ (element_array+loop)->datum = loop + number_elements_total;
+ }
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ {
+ offset = LFDS700_MISC_PRNG_GENERATE( &ps );
+ offset %= number_elements_total;
+ temp = (element_array + offset)->key;
+ (element_array + offset)->key = (element_array + loop)->key;
+ (element_array + loop)->key = temp;
+ }
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->has = &has;
+ (ts+loop)->element_array = element_array + number_elements_per_thread*loop;
+ (ts+loop)->error_flag = LOWERED;
+ (ts+loop)->number_elements_per_thread = number_elements_per_thread;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_adding, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ // TRD : now for validation
+ vi.min_elements = vi.max_elements = number_elements_total;
+ lfds700_hash_a_query( &has, LFDS700_HASH_A_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) &dvs );
+
+ /* TRD : now we attempt to lfds700_hash_a_get_by_key() for every element in number_array
+ any failure to find is an error
+ we also check we've obtained the correct element
+ */
+
+ for( loop = 0 ; dvs == LFDS700_MISC_VALIDITY_VALID and loop < number_elements_total ; loop++ )
+ if( 0 == lfds700_hash_a_get_by_key(&has, (void *) (ts->element_array+loop)->key, &hae) )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+ else
+ {
+ value = (lfds700_pal_uint_t) LFDS700_HASH_A_GET_VALUE_FROM_ELEMENT( *hae );
+ if( (ts->element_array+loop)->datum != value )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+
+ // TRD : just check error_flags weren't raised
+ if( dvs == LFDS700_MISC_VALIDITY_VALID )
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ if( (ts+loop)->error_flag == RAISED )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : cleanup
+ lfds700_hash_a_cleanup( &has, NULL );
+
+ util_aligned_free( baus );
+
+ free( ts );
+
+ util_aligned_free( element_array );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "hash_a", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_adding( void *util_thread_starter_thread_state )
+{
+ enum lfds700_hash_a_insert_result
+ apr;
+
+ lfds700_pal_uint_t
+ index = 0;
+
+ struct test_state
+ *ts;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ while( index < ts->number_elements_per_thread )
+ {
+ LFDS700_HASH_A_SET_KEY_IN_ELEMENT( (ts->element_array+index)->hae, (ts->element_array+index)->key );
+ LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( (ts->element_array+index)->hae, (ts->element_array+index)->datum );
+ apr = lfds700_hash_a_insert( ts->has, &(ts->element_array+index)->hae, NULL, &ps );
+
+ if( apr == LFDS700_HASH_A_PUT_RESULT_FAILURE_EXISTING_KEY )
+ ts->error_flag = RAISED;
+
+ index++;
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ int
+ cr = 0;
+
+ // TRD : new_key can be NULL (i.e. 0)
+ // TRD : existing_key can be NULL (i.e. 0)
+
+ if( (lfds700_pal_uint_t) new_key < (lfds700_pal_uint_t) existing_key )
+ cr = -1;
+
+ if( (lfds700_pal_uint_t) new_key > (lfds700_pal_uint_t) existing_key )
+ cr = 1;
+
+ return( cr );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void key_hash_function( void const *key, lfds700_pal_uint_t *hash )
+{
+ // TRD : key can be NULL
+ assert( hash != NULL );
+
+ *hash = 0;
+
+ /* TRD : this function iterates over the user data
+ and we are using the void pointer *as* key data
+ so here we need to pass in the addy of key
+ */
+
+ LFDS700_HASH_A_32BIT_HASH_FUNCTION( (void *) &key, sizeof(lfds700_pal_uint_t), *hash );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_value, void const *value_in_tree );
+static void key_hash_function( void const *key, lfds700_pal_uint_t *hash );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_hash_a_fail_and_overwrite_on_existing_key()
+{
+ enum lfds700_hash_a_insert_result
+ apr;
+
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ struct lfds700_hash_a_element
+ hae_one,
+ hae_two;
+
+ struct lfds700_hash_a_state
+ has;
+
+ struct lfds700_btree_au_state
+ *baus;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ internal_display_test_name( "Fail and overwrite on existing key" );
+
+ lfds700_misc_prng_init( &ps );
+
+ baus = util_aligned_malloc( sizeof(struct lfds700_btree_au_state) * 10, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ // TRD : fail on overwrite
+ lfds700_hash_a_init_valid_on_current_logical_core( &has, baus, 10, key_compare_function, key_hash_function, LFDS700_HASH_A_EXISTING_KEY_FAIL, NULL );
+
+ LFDS700_HASH_A_SET_KEY_IN_ELEMENT( hae_one, 1 );
+ LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( hae_one, 0 );
+ apr = lfds700_hash_a_insert( &has, &hae_one, NULL, &ps );
+
+ if( apr != LFDS700_HASH_A_PUT_RESULT_SUCCESS )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ LFDS700_HASH_A_SET_KEY_IN_ELEMENT( hae_two, 1 );
+ LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( hae_two, 1 );
+ apr = lfds700_hash_a_insert( &has, &hae_two, NULL, &ps );
+
+ if( apr != LFDS700_HASH_A_PUT_RESULT_FAILURE_EXISTING_KEY )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ lfds700_hash_a_cleanup( &has, NULL );
+
+ // TRD : success on overwrite
+ lfds700_hash_a_init_valid_on_current_logical_core( &has, baus, 10, key_compare_function, key_hash_function, LFDS700_HASH_A_EXISTING_KEY_OVERWRITE, NULL );
+
+ LFDS700_HASH_A_SET_KEY_IN_ELEMENT( hae_one, 1 );
+ LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( hae_one, 1 );
+ apr = lfds700_hash_a_insert( &has, &hae_one, NULL, &ps );
+
+ if( apr != LFDS700_HASH_A_PUT_RESULT_SUCCESS )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ LFDS700_HASH_A_SET_KEY_IN_ELEMENT( hae_two, 1 );
+ LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( hae_two, 1 );
+ apr = lfds700_hash_a_insert( &has, &hae_two, NULL, &ps );
+
+ if( apr != LFDS700_HASH_A_PUT_RESULT_SUCCESS_OVERWRITE )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ lfds700_hash_a_cleanup( &has, NULL );
+
+ util_aligned_free( baus );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "hash_a", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int key_compare_function( void const *new_key, void const *key_in_tree )
+{
+ int
+ cr = 0;
+
+ // TRD : key_new can be any value in its range
+ // TRD : key_in_tree can be any value in its range
+
+ if( (lfds700_pal_uint_t) new_key < (lfds700_pal_uint_t) key_in_tree )
+ cr = -1;
+
+ if( (lfds700_pal_uint_t) new_key > (lfds700_pal_uint_t) key_in_tree )
+ cr = 1;
+
+ return( cr );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void key_hash_function( void const *key, lfds700_pal_uint_t *hash )
+{
+ // TRD : key can be NULL
+ assert( hash != NULL );
+
+ *hash = 0;
+
+ /* TRD : this function iterates over the user data
+ and we are using the void pointer *as* key data
+ so here we need to pass in the addy of key
+ */
+
+ LFDS700_HASH_A_32BIT_HASH_FUNCTION( (void *) &key, sizeof(lfds700_pal_uint_t), *hash );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds700_hash_a_element
+ hae;
+
+ lfds700_pal_uint_t
+ key;
+};
+
+struct test_state
+{
+ lfds700_pal_uint_t
+ number_elements_per_thread,
+ overwrite_count;
+
+ struct lfds700_hash_a_state
+ *has;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_adding( void *util_thread_starter_thread_state );
+static int key_compare_function( void const *new_key, void const *existing_key );
+static void key_hash_function( void const *key, lfds700_pal_uint_t *hash );
+static int qsort_and_bsearch_key_compare_function( void const *e1, void const *e2 );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_hash_a_random_adds_overwrite_on_existing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ int
+ rv;
+
+ lfds700_pal_uint_t
+ actual_sum_overwrite_existing_count,
+ expected_sum_overwrite_existing_count,
+ *key_count_array,
+ loop,
+ number_elements_per_thread,
+ number_elements_total,
+ number_logical_processors,
+ random_value;
+
+ struct lfds700_hash_a_iterate
+ hai;
+
+ struct lfds700_hash_a_element
+ *hae;
+
+ struct lfds700_hash_a_state
+ has;
+
+ struct lfds700_list_asu_element
+ *lasue = NULL;
+
+ struct lfds700_btree_au_state
+ *baus;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *element_array;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ void
+ *key_pointer,
+ *key;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we create a single hash_a
+ we generate n elements per thread
+ each element contains a key value, which is set to a random value
+ (we don't use value, so it's just set to 0)
+ the threads then run, putting
+ the threads count their number of overwrite hits
+ once the threads are done, then we
+ count the number of each key
+ from this we figure out the min/max element for hash_a validation, so we call validation
+ we check the sum of overwrites for each thread is what it should be
+ then using the hash_a get() we check all the elements we expect are present
+ and then we iterate over the hash_a
+ checking we see each key once
+ */
+
+ internal_display_test_name( "Random adds, get and iterate (overwrite on existing key)" );
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ baus = util_aligned_malloc( sizeof(struct lfds700_btree_au_state) * 1000, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds700_hash_a_init_valid_on_current_logical_core( &has, baus, 1000, key_compare_function, key_hash_function, LFDS700_HASH_A_EXISTING_KEY_OVERWRITE, NULL );
+
+ // TRD : we divide by 2 beccause we have to allocate a second array of this size later
+ number_elements_per_thread = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors ) / 2;
+ number_elements_total = number_elements_per_thread * number_logical_processors;
+
+ // TRD : created an ordered list of unique numbers
+ element_array = util_aligned_malloc( sizeof(struct test_element) * number_elements_total, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ {
+ random_value = LFDS700_MISC_PRNG_GENERATE( &ps );
+ (element_array+loop)->key = (lfds700_pal_uint_t) floor( (number_elements_total/2) * ((double) random_value / (double) LFDS700_MISC_PRNG_MAX) );
+ }
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->has = &has;
+ (ts+loop)->element_array = element_array + number_elements_per_thread*loop;
+ (ts+loop)->overwrite_count = 0;
+ (ts+loop)->number_elements_per_thread = number_elements_per_thread;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_adding, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ // TRD : now for validation
+ key_count_array = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_elements_total );
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ *(key_count_array+loop) = 0;
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ ( *(key_count_array + (element_array+loop)->key) )++;
+
+ vi.min_elements = number_elements_total;
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ if( *(key_count_array+loop) == 0 )
+ vi.min_elements--;
+
+ vi.max_elements = vi.min_elements;
+
+ lfds700_hash_a_query( &has, LFDS700_HASH_A_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) &dvs );
+
+ expected_sum_overwrite_existing_count = 0;
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ if( *(key_count_array+loop) != 0 )
+ expected_sum_overwrite_existing_count += *(key_count_array+loop) - 1;
+
+ actual_sum_overwrite_existing_count = 0;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ actual_sum_overwrite_existing_count += (ts+loop)->overwrite_count;
+
+ if( expected_sum_overwrite_existing_count != actual_sum_overwrite_existing_count )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : now loop over the expected array and check we can get() every element
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ if( *(key_count_array+loop) > 0 )
+ {
+ rv = lfds700_hash_a_get_by_key( &has, (void *) loop, &hae );
+
+ if( rv != 1 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+
+ /* TRD : now iterate, checking we find every element and no others
+ to do this in a timely manner, we need to qsort() the key values
+ and use bsearch() to check for items in the array
+ */
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ if( *(key_count_array+loop) != 0 )
+ *(key_count_array+loop) = loop;
+ else
+ *(key_count_array+loop) = 0;
+
+ qsort( key_count_array, number_elements_total, sizeof(lfds700_pal_uint_t), qsort_and_bsearch_key_compare_function );
+
+ lfds700_hash_a_iterate_init( &has, &hai );
+
+ while( dvs == LFDS700_MISC_VALIDITY_VALID and lfds700_hash_a_iterate(&hai, &hae) )
+ {
+ key = LFDS700_HASH_A_GET_KEY_FROM_ELEMENT( *hae );
+
+ key_pointer = bsearch( &key, key_count_array, number_elements_total, sizeof(lfds700_pal_uint_t), qsort_and_bsearch_key_compare_function );
+
+ if( key_pointer == NULL )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+
+ // TRD : cleanup
+ lfds700_hash_a_cleanup( &has, NULL );
+
+ util_aligned_free( baus );
+
+ free( ts );
+
+ util_aligned_free( element_array );
+
+ free( key_count_array );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "hash_a", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_adding( void *util_thread_starter_thread_state )
+{
+ enum lfds700_hash_a_insert_result
+ apr;
+
+ lfds700_pal_uint_t
+ index = 0;
+
+ struct test_state
+ *ts;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ while( index < ts->number_elements_per_thread )
+ {
+ LFDS700_HASH_A_SET_KEY_IN_ELEMENT( (ts->element_array+index)->hae, (ts->element_array+index)->key );
+ LFDS700_HASH_A_SET_VALUE_IN_ELEMENT( (ts->element_array+index)->hae, 0 );
+ apr = lfds700_hash_a_insert( ts->has, &(ts->element_array+index)->hae, NULL, &ps );
+
+ if( apr == LFDS700_HASH_A_PUT_RESULT_SUCCESS_OVERWRITE )
+ ts->overwrite_count++;
+
+ index++;
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ int
+ cr = 0;
+
+ // TRD : new_key can be NULL (i.e. 0)
+ // TRD : existing_key can be NULL (i.e. 0)
+
+ if( (lfds700_pal_uint_t) new_key < (lfds700_pal_uint_t) existing_key )
+ cr = -1;
+
+ if( (lfds700_pal_uint_t) new_key > (lfds700_pal_uint_t) existing_key )
+ cr = 1;
+
+ return( cr );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void key_hash_function( void const *key, lfds700_pal_uint_t *hash )
+{
+ // TRD : key can be NULL
+ assert( hash != NULL );
+
+ *hash = 0;
+
+ /* TRD : this function iterates over the user data
+ and we are using the void pointer *as* key data
+ so here we need to pass in the addy of key
+ */
+
+ LFDS700_HASH_A_32BIT_HASH_FUNCTION( (void *) &key, sizeof(lfds700_pal_uint_t), *hash );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+static int qsort_and_bsearch_key_compare_function( void const *e1, void const *e2 )
+{
+ int
+ cr = 0;
+
+ lfds700_pal_uint_t
+ s1,
+ s2;
+
+ s1 = *(lfds700_pal_uint_t *) e1;
+ s2 = *(lfds700_pal_uint_t *) e2;
+
+ if( s1 > s2 )
+ cr = 1;
+
+ if( s1 < s2 )
+ cr = -1;
+
+ return( cr );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_list_aos( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ if( LFDS700_MISC_ATOMIC_SUPPORT_CAS )
+ {
+ printf( "\n"
+ "List (add-only, ordered, singly-linked) Tests\n"
+ "=============================================\n" );
+
+ test_lfds700_list_aos_alignment();
+ test_lfds700_list_aos_new_ordered( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_list_aos_new_ordered_with_cursor( list_of_logical_processors, memory_in_megabytes );
+ }
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_list_aos_alignment()
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ internal_display_test_name( "Alignment" );
+
+
+
+ // TRD : struct lfds700_list_aos_element
+ if( offsetof(struct lfds700_list_aos_element,next) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_list_aos_element,value) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_list_aos_element,key) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : struct lfds700_list_asu_state
+ if( offsetof(struct lfds700_list_aos_state,start) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_list_aos_state,dummy_element) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_list_aos_state,key_compare_function) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "list_aos", dvs );
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds700_list_aos_element
+ laose;
+
+ lfds700_pal_uint_t
+ element_number,
+ thread_number;
+};
+
+struct test_state
+{
+ lfds700_pal_uint_t
+ number_elements_per_thread;
+
+ struct lfds700_list_aos_state
+ *laoss;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static int new_ordered_compare_function( void const *value_new, void const *value_in_list );
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION new_ordered_thread( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_list_aos_new_ordered( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ expected_element_number,
+ number_elements_per_thread,
+ number_elements_total,
+ number_logical_processors,
+ offset,
+ temp;
+
+ struct lfds700_list_aos_element
+ *laose = NULL;
+
+ struct lfds700_list_asu_element
+ *lasue = NULL;
+
+ struct lfds700_list_aos_state
+ laoss;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct test_element
+ *element_array,
+ *element;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_state
+ *tts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : run one thread per logical processor
+ we have a single array of 10k elements per thread
+ this is set to be randomly ordered (but with contigious numbers from 0 to n)
+ we give 10k to each thread (a pointer into the array at the correct point)
+ which then loops through that array
+ calling lfds700_list_aos_insert_element_by_position( LFDS700_LIST_AOS_POSITION_ORDERED )
+ verification should show list is sorted
+ */
+
+ internal_display_test_name( "New ordered" );
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_list_aos_init_valid_on_current_logical_core( &laoss, new_ordered_compare_function, LFDS700_LIST_AOS_INSERT_RESULT_FAILURE_EXISTING_KEY, NULL );
+
+ /* TRD : create randomly ordered number array with unique elements
+
+ unique isn't necessary - the list will sort anyway - but
+ it permits slightly better validation
+ */
+
+ number_elements_per_thread = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors );
+
+ // TRD : or the test takes a looooooong time...
+ if( number_elements_per_thread > 10000 )
+ number_elements_per_thread = 10000;
+
+ number_elements_total = number_elements_per_thread * number_logical_processors;
+
+ element_array = util_aligned_malloc( sizeof(struct test_element) * number_elements_total, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ (element_array+loop)->element_number = loop;
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ {
+ offset = LFDS700_MISC_PRNG_GENERATE( &ps );
+ offset %= number_elements_total;
+ temp = (element_array + offset)->element_number;
+ (element_array + offset)->element_number = (element_array + loop)->element_number;
+ (element_array + loop)->element_number = temp;
+ }
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->laoss = &laoss;
+ (ts+loop)->element_array = element_array + (loop*number_elements_per_thread);
+ (ts+loop)->number_elements_per_thread = number_elements_per_thread;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, new_ordered_thread, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ free( ts );
+
+ /* TRD : validate the resultant list
+ iterate over the list
+ we expect to find the list is sorted,
+ which means that element_number will
+ increment from zero
+ */
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements_total;
+
+ lfds700_list_aos_query( &laoss, LFDS700_LIST_AOS_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ if( dvs == LFDS700_MISC_VALIDITY_VALID )
+ {
+ expected_element_number = 0;
+
+ // TRD : traverse the list and check combined_data_array matches
+ while( dvs == LFDS700_MISC_VALIDITY_VALID and LFDS700_LIST_AOS_GET_START_AND_THEN_NEXT(laoss, laose) )
+ {
+ element = LFDS700_LIST_AOS_GET_VALUE_FROM_ELEMENT( *laose );
+
+ if( element->element_number != expected_element_number++ )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+ }
+
+ lfds700_list_aos_cleanup( &laoss, NULL );
+
+ util_aligned_free( element_array );
+
+ internal_display_test_result( 1, "list_aos", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int new_ordered_compare_function( void const *value_new, void const *value_in_list )
+{
+ int
+ cr = 0;
+
+ struct test_element
+ *e1,
+ *e2;
+
+ // TRD : value_new can be any value in its range
+ // TRD : value_in_list can be any value in its range
+
+ e1 = (struct test_element *) value_new;
+ e2 = (struct test_element *) value_in_list;
+
+ if( e1->element_number < e2->element_number )
+ cr = -1;
+
+ if( e1->element_number > e2->element_number )
+ cr = 1;
+
+ return( cr );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION new_ordered_thread( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ loop;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ for( loop = 0 ; loop < ts->number_elements_per_thread ; loop++ )
+ {
+ LFDS700_LIST_AOS_SET_KEY_IN_ELEMENT( (ts->element_array+loop)->laose, ts->element_array+loop );
+ LFDS700_LIST_AOS_SET_VALUE_IN_ELEMENT( (ts->element_array+loop)->laose, ts->element_array+loop );
+ lfds700_list_aos_insert( ts->laoss, &(ts->element_array+loop)->laose, NULL, &ps );
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds700_list_aos_element
+ laose;
+
+ lfds700_pal_uint_t
+ element_number,
+ thread_number;
+};
+
+struct test_state
+{
+ enum flag
+ error_flag;
+
+ lfds700_pal_uint_t
+ number_elements_per_thread;
+
+ struct lfds700_list_aos_state
+ *laoss;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static int new_ordered_with_cursor_compare_function( void const *value_new, void const *value_in_list );
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION new_ordered_with_cursor_insert_thread( void *util_thread_starter_thread_state );
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION new_ordered_with_cursor_cursor_thread( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_list_aos_new_ordered_with_cursor( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements_per_thread,
+ number_elements_total,
+ number_logical_processors,
+ offset,
+ temp;
+
+ struct lfds700_list_aos_state
+ laoss;
+
+ struct lfds700_list_asu_element
+ *lasue = NULL;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *element_array;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : run two threads per logical processor
+
+ the test runs for 10 seconds
+
+ the first thread loops over a pre-set list of random numbers
+ continually adding them using ordered insert
+
+ the second thread keeps iterating over the list, checking that
+ each element is larger than its predecessor
+ */
+
+ internal_display_test_name( "New ordered with cursor (%d seconds)", TEST_DURATION_IN_SECONDS );
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_list_aos_init_valid_on_current_logical_core( &laoss, new_ordered_with_cursor_compare_function, LFDS700_LIST_AOS_INSERT_RESULT_FAILURE_EXISTING_KEY, NULL );
+
+ /* TRD : create randomly ordered number array with unique elements
+
+ unique isn't necessary - the list will sort anyway - but
+ it permits slightly better validation
+ */
+
+ number_elements_per_thread = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors );
+
+ // TRD : or the test takes a looooooong time...
+ if( number_elements_per_thread > 1000 )
+ number_elements_per_thread = 1000;
+
+ number_elements_total = number_elements_per_thread * number_logical_processors;
+
+ element_array = util_aligned_malloc( sizeof(struct test_element) * number_elements_total, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ (element_array+loop)->element_number = loop;
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ {
+ offset = LFDS700_MISC_PRNG_GENERATE( &ps );
+ offset %= number_elements_total;
+ temp = (element_array + offset)->element_number;
+ (element_array + offset)->element_number = (element_array + loop)->element_number;
+ (element_array + loop)->element_number = temp;
+ }
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors * 2 );
+
+ // TRD : the insert threads
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->laoss = &laoss;
+ (ts+loop)->element_array = element_array + number_elements_per_thread*loop;
+ (ts+loop)->error_flag = LOWERED;
+ (ts+loop)->number_elements_per_thread = number_elements_per_thread;
+ }
+
+ // TRD : the cursor threads
+ for( loop = number_logical_processors ; loop < number_logical_processors * 2 ; loop++ )
+ {
+ (ts+loop)->laoss = &laoss;
+ (ts+loop)->element_array = NULL;
+ (ts+loop)->error_flag = LOWERED;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors * 2 );
+
+ util_thread_starter_new( &tts, number_logical_processors * 2 );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, new_ordered_with_cursor_insert_thread, ts+loop );
+ util_thread_starter_start( tts, &thread_handles[loop+number_logical_processors], loop+number_logical_processors, lp, new_ordered_with_cursor_cursor_thread, ts+loop+number_logical_processors );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors * 2 ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ /* TRD : validate the resultant list
+
+ the cursor threads were checking for orderedness
+ if that failed, they raise their error_flag
+ so validate the list, then check error_flags
+ */
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements_total;
+
+ lfds700_list_aos_query( &laoss, LFDS700_LIST_AOS_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ if( dvs == LFDS700_MISC_VALIDITY_VALID )
+ for( loop = number_logical_processors ; loop < number_logical_processors * 2 ; loop++ )
+ if( (ts+loop)->error_flag == RAISED )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_ORDER;
+
+ lfds700_list_aos_cleanup( &laoss, NULL );
+
+ util_aligned_free( element_array );
+
+ free( ts );
+
+ internal_display_test_result( 1, "list_aos", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int new_ordered_with_cursor_compare_function( void const *value_new, void const *value_in_list )
+{
+ int
+ cr = 0;
+
+ struct test_element
+ *e1,
+ *e2;
+
+ // TRD : value_new can be any value in its range
+ // TRD : value_in_list can be any value in its range
+
+ e1 = (struct test_element *) value_new;
+ e2 = (struct test_element *) value_in_list;
+
+ if( e1->element_number < e2->element_number )
+ cr = -1;
+
+ if( e1->element_number > e2->element_number )
+ cr = 1;
+
+ return( cr );
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION new_ordered_with_cursor_insert_thread( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ loop;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ for( loop = 0 ; loop < ts->number_elements_per_thread ; loop++ )
+ {
+ LFDS700_LIST_AOS_SET_KEY_IN_ELEMENT( (ts->element_array+loop)->laose, ts->element_array+loop );
+ LFDS700_LIST_AOS_SET_VALUE_IN_ELEMENT( (ts->element_array+loop)->laose, ts->element_array+loop );
+ lfds700_list_aos_insert( ts->laoss, &(ts->element_array+loop)->laose, NULL, &ps );
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION new_ordered_with_cursor_cursor_thread( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ prev_element_number;
+
+ lfds700_pal_uint_t
+ time_loop = 0;
+
+ struct lfds700_list_aos_element
+ *laose;
+
+ struct test_element
+ *element;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ prev_element_number = 0;
+
+ laose = LFDS700_LIST_AOS_GET_START( *ts->laoss );
+
+ // TRD : we may get start before any element has been added to the list
+ if( laose == NULL )
+ continue;
+
+ element = LFDS700_LIST_AOS_GET_VALUE_FROM_ELEMENT( *laose );
+
+ if( element->element_number < prev_element_number )
+ ts->error_flag = RAISED;
+
+ prev_element_number = element->element_number;
+
+ laose = LFDS700_LIST_AOS_GET_NEXT( *laose );
+
+ while( laose != NULL )
+ {
+ element = LFDS700_LIST_AOS_GET_VALUE_FROM_ELEMENT( *laose );
+
+ if( element->element_number <= prev_element_number )
+ ts->error_flag = RAISED;
+
+ prev_element_number = element->element_number;
+
+ laose = LFDS700_LIST_AOS_GET_NEXT( *laose );
+ }
+
+ if( time_loop++ == REDUCED_TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_list_asu( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ if( LFDS700_MISC_ATOMIC_SUPPORT_CAS )
+ {
+ printf( "\n"
+ "List (add-only, singly-linked, unordered) Tests\n"
+ "===============================================\n" );
+
+ test_lfds700_list_asu_alignment();
+ test_lfds700_list_asu_new_start( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_list_asu_new_end( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_list_asu_new_after( list_of_logical_processors, memory_in_megabytes );
+ }
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_list_asu_alignment()
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ internal_display_test_name( "Alignment" );
+
+
+
+ // TRD : struct lfds700_list_asu_element
+ if( offsetof(struct lfds700_list_asu_element,next) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_list_asu_element,value) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_list_asu_element,key) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : struct lfds700_list_asu_state
+ if( offsetof(struct lfds700_list_asu_state,end) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_list_asu_state,start) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_list_asu_state,dummy_element) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_list_asu_state,key_compare_function) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "list_asu", dvs );
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds700_list_asu_element
+ lasue;
+
+ lfds700_pal_uint_t
+ element_number,
+ thread_number;
+};
+
+struct test_state
+{
+ lfds700_pal_uint_t
+ number_elements;
+
+ struct lfds700_list_asu_state
+ *lasus;
+
+ struct test_element
+ *element_array;
+
+ struct lfds700_list_asu_element
+ *first_element;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION new_after_thread( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_list_asu_new_after( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors,
+ *per_thread_counters,
+ subloop;
+
+ struct lfds700_list_asu_element
+ *lasue,
+ first_element;
+
+ struct lfds700_list_asu_state
+ lasus;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *element_array,
+ *element;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : run one thread per logical processor
+ run for 250k elements
+ we put a single first element into the list and
+ each thread loops, calling lfds700_list_asu_new_element_by_position( LFDS700_LIST_ASU_POSITION_AFTER ),
+ inserting after the single first element
+ data element contain s thread_number and element_number
+ verification should show element_number decreasing on a per thread basis
+ */
+
+ internal_display_test_name( "New after" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_list_asu_init_valid_on_current_logical_core( &lasus, NULL, NULL );
+
+ LFDS700_LIST_ASU_SET_KEY_IN_ELEMENT( first_element, NULL );
+ LFDS700_LIST_ASU_SET_VALUE_IN_ELEMENT( first_element, NULL );
+ lfds700_list_asu_insert_at_position( &lasus, &first_element, NULL, LFDS700_LIST_ASU_POSITION_START, &ps );
+
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors );
+
+ element_array = util_aligned_malloc( sizeof(struct test_element) * number_logical_processors * number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ for( subloop = 0 ; subloop < number_elements ; subloop++ )
+ {
+ (element_array+(loop*number_elements)+subloop)->thread_number = loop;
+ (element_array+(loop*number_elements)+subloop)->element_number = subloop;
+ }
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {\
+ (ts+loop)->lasus = &lasus;
+ (ts+loop)->element_array = element_array + (loop*number_elements);
+ (ts+loop)->first_element = &first_element;
+ (ts+loop)->number_elements = number_elements;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, new_after_thread, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ free( ts );
+
+ /* TRD : validate the resultant list
+ iterate over each element
+ we expect to find element numbers increment on a per thread basis
+ */
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements * number_logical_processors + 1;
+
+ lfds700_list_asu_query( &lasus, LFDS700_LIST_ASU_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = number_elements - 1;
+
+ /* TRD : we have a leading element, after which all inserts occurred
+ we need to get past that element for validation
+ this is why we're not using lfds700_list_asu_get_start_and_then_next()
+ */
+
+ lasue = LFDS700_LIST_ASU_GET_START( lasus );
+
+ lasue = LFDS700_LIST_ASU_GET_NEXT( *lasue );
+
+ while( dvs == LFDS700_MISC_VALIDITY_VALID and lasue != NULL )
+ {
+ element = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ if( element->thread_number >= number_logical_processors )
+ {
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( element->element_number < per_thread_counters[element->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( element->element_number > per_thread_counters[element->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( element->element_number == per_thread_counters[element->thread_number] )
+ per_thread_counters[element->thread_number]--;
+
+ lasue = LFDS700_LIST_ASU_GET_NEXT( *lasue );
+ }
+
+ free( per_thread_counters );
+
+ lfds700_list_asu_cleanup( &lasus, NULL );
+
+ util_aligned_free( element_array );
+
+ internal_display_test_result( 1, "list_asu", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION new_after_thread( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ loop;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ for( loop = 0 ; loop < ts->number_elements ; loop++ )
+ {
+ LFDS700_LIST_ASU_SET_KEY_IN_ELEMENT( (ts->element_array+loop)->lasue, ts->element_array+loop );
+ LFDS700_LIST_ASU_SET_VALUE_IN_ELEMENT( (ts->element_array+loop)->lasue, ts->element_array+loop );
+ lfds700_list_asu_insert_at_position( ts->lasus, &(ts->element_array+loop)->lasue, ts->first_element, LFDS700_LIST_ASU_POSITION_AFTER, &ps );
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds700_list_asu_element
+ lasue;
+
+ lfds700_pal_uint_t
+ element_number,
+ thread_number;
+};
+
+struct test_state
+{
+ lfds700_pal_uint_t
+ number_elements;
+
+ struct lfds700_list_asu_state
+ *lasus;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION new_end_thread( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_list_asu_new_end( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors,
+ *per_thread_counters,
+ subloop;
+
+ struct lfds700_list_asu_element
+ *lasue = NULL;
+
+ struct lfds700_list_asu_state
+ lasus;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *element_array,
+ *element;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : run one thread per logical processor
+ run for 250k elements
+ each thread loops, calling lfds700_list_asu_new_element_by_position( LFDS700_LIST_ASU_POSITION_END )
+ data element contain a thread_number and element_number
+ verification should show element_number increasing on a per thread basis
+ */
+
+ internal_display_test_name( "New end" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_list_asu_init_valid_on_current_logical_core( &lasus, NULL, NULL );
+
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors );
+
+ element_array = util_aligned_malloc( sizeof(struct test_element) * number_logical_processors * number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ for( subloop = 0 ; subloop < number_elements ; subloop++ )
+ {
+ (element_array+(loop*number_elements)+subloop)->thread_number = loop;
+ (element_array+(loop*number_elements)+subloop)->element_number = subloop;
+ }
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->lasus = &lasus;
+ (ts+loop)->element_array = element_array + (loop*number_elements);
+ (ts+loop)->number_elements = number_elements;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, new_end_thread, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ free( ts );
+
+ /* TRD : validate the resultant list
+ iterate over each element
+ we expect to find element numbers increment on a per thread basis
+ */
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements * number_logical_processors;
+
+ lfds700_list_asu_query( &lasus, LFDS700_LIST_ASU_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ lasue = NULL;
+
+ while( dvs == LFDS700_MISC_VALIDITY_VALID and LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(lasus, lasue) )
+ {
+ element = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ if( element->thread_number >= number_logical_processors )
+ {
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( element->element_number > per_thread_counters[element->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( element->element_number < per_thread_counters[element->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( element->element_number == per_thread_counters[element->thread_number] )
+ per_thread_counters[element->thread_number]++;
+ }
+
+ free( per_thread_counters );
+
+ lfds700_list_asu_cleanup( &lasus, NULL );
+
+ util_aligned_free( element_array );
+
+ internal_display_test_result( 1, "list_asu", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION new_end_thread( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ loop;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ for( loop = 0 ; loop < ts->number_elements ; loop++ )
+ {
+ LFDS700_LIST_ASU_SET_KEY_IN_ELEMENT( (ts->element_array+loop)->lasue, ts->element_array+loop );
+ LFDS700_LIST_ASU_SET_VALUE_IN_ELEMENT( (ts->element_array+loop)->lasue, ts->element_array+loop );
+ lfds700_list_asu_insert_at_position( ts->lasus, &(ts->element_array+loop)->lasue, NULL, LFDS700_LIST_ASU_POSITION_END, &ps );
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds700_list_asu_element
+ lasue;
+
+ lfds700_pal_uint_t
+ element_number,
+ thread_number;
+};
+
+struct test_state
+{
+ lfds700_pal_uint_t
+ number_elements;
+
+ struct lfds700_list_asu_state
+ *lasus;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION new_start_thread( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_list_asu_new_start( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors,
+ *per_thread_counters,
+ subloop;
+
+ struct lfds700_list_asu_element
+ *lasue = NULL;
+
+ struct lfds700_list_asu_state
+ lasus;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_element
+ *element_array,
+ *element;
+
+ struct test_state
+ *ts;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : run one thread per logical processor
+ run for 250k elements
+ each thread loops, calling lfds700_list_asu_new_element_by_position( LFDS700_LIST_ASU_POSITION_START )
+ data element contain s thread_number and element_number
+ verification should show element_number decreasing on a per thread basis
+ */
+
+ internal_display_test_name( "New start" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_list_asu_init_valid_on_current_logical_core( &lasus, NULL, NULL );
+
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors );
+
+ element_array = util_aligned_malloc( sizeof(struct test_element) * number_logical_processors * number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ for( subloop = 0 ; subloop < number_elements ; subloop++ )
+ {
+ (element_array+(loop*number_elements)+subloop)->thread_number = loop;
+ (element_array+(loop*number_elements)+subloop)->element_number = subloop;
+ }
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->lasus = &lasus;
+ (ts+loop)->element_array = element_array + (loop*number_elements);
+ (ts+loop)->number_elements = number_elements;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, new_start_thread, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ free( ts );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ /* TRD : validate the resultant list
+ iterate over each element
+ we expect to find element numbers increment on a per thread basis
+ */
+
+ vi.min_elements = vi.max_elements = number_elements * number_logical_processors;
+
+ lfds700_list_asu_query( &lasus, LFDS700_LIST_ASU_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = number_elements - 1;
+
+ lasue = NULL;
+
+ while( dvs == LFDS700_MISC_VALIDITY_VALID and LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(lasus, lasue) )
+ {
+ element = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ if( element->thread_number >= number_logical_processors )
+ {
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( element->element_number < per_thread_counters[element->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( element->element_number > per_thread_counters[element->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( element->element_number == per_thread_counters[element->thread_number] )
+ per_thread_counters[element->thread_number]--;
+ }
+
+ free( per_thread_counters );
+
+ lfds700_list_asu_cleanup( &lasus, NULL );
+
+ util_aligned_free( element_array );
+
+ internal_display_test_result( 1, "list_asu", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION new_start_thread( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ loop;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ for( loop = 0 ; loop < ts->number_elements ; loop++ )
+ {
+ LFDS700_LIST_ASU_SET_KEY_IN_ELEMENT( (ts->element_array+loop)->lasue, ts->element_array+loop );
+ LFDS700_LIST_ASU_SET_VALUE_IN_ELEMENT( (ts->element_array+loop)->lasue, ts->element_array+loop );
+ lfds700_list_asu_insert_at_position( ts->lasus, &(ts->element_array+loop)->lasue, NULL, LFDS700_LIST_ASU_POSITION_START, &ps );
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_pal_atomic( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ printf( "\n"
+ "Abstraction Atomic Tests\n"
+ "========================\n" );
+
+ if( LFDS700_MISC_ATOMIC_SUPPORT_CAS )
+ test_lfds700_pal_atomic_cas( list_of_logical_processors );
+
+ if( LFDS700_MISC_ATOMIC_SUPPORT_DWCAS )
+ test_lfds700_pal_atomic_dwcas( list_of_logical_processors );
+
+ if( LFDS700_MISC_ATOMIC_SUPPORT_EXCHANGE )
+ test_lfds700_pal_atomic_exchange( list_of_logical_processors, memory_in_megabytes );
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_lfds700_pal_atomic_cas_state
+{
+ lfds700_pal_uint_t
+ local_counter;
+
+ lfds700_pal_atom_t volatile
+ *shared_counter;
+};
+
+/***** private prototyps *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_cas( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_pal_atomic_cas( struct lfds700_list_asu_state *list_of_logical_processors )
+{
+ lfds700_pal_atom_t volatile LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ shared_counter;
+
+ lfds700_pal_uint_t
+ local_total = 0;
+
+ lfds700_pal_uint_t
+ loop,
+ number_logical_processors;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_lfds700_pal_atomic_cas_state
+ *atcs;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+
+ /* TRD : here we test pal_cas
+
+ we run one thread per CPU
+ we use pal_cas() to increment a shared counter
+ every time a thread successfully increments the counter,
+ it increments a thread local counter
+ the threads run for ten seconds
+ after the threads finish, we total the local counters
+ they should equal the shared counter
+ */
+
+ internal_display_test_name( "Atomic CAS" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ shared_counter = 0;
+
+ atcs = util_malloc_wrapper( sizeof(struct test_lfds700_pal_atomic_cas_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (atcs+loop)->shared_counter = &shared_counter;
+ (atcs+loop)->local_counter = 0;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_cas, atcs+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ // TRD : results
+ LFDS700_MISC_BARRIER_LOAD;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ local_total += (atcs+loop)->local_counter;
+
+ if( local_total == shared_counter )
+ puts( "passed" );
+
+ if( local_total != shared_counter )
+ {
+ puts( "failed" );
+ exit( EXIT_FAILURE );
+ }
+
+ // TRD : cleanup
+ free( atcs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_cas( void *util_thread_starter_thread_state )
+{
+ char unsigned
+ result;
+
+ lfds700_pal_uint_t
+ loop = 0;
+
+ lfds700_pal_atom_t LFDS700_PAL_ALIGN(LFDS700_PAL_ALIGN_SINGLE_POINTER)
+ exchange,
+ compare;
+
+ struct test_lfds700_pal_atomic_cas_state
+ *atcs;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ atcs = (struct test_lfds700_pal_atomic_cas_state *) tsts->thread_user_state;
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ while( loop++ < 10000000 )
+ {
+ compare = *atcs->shared_counter;
+
+ do
+ {
+ exchange = compare + 1;
+ LFDS700_PAL_ATOMIC_CAS( atcs->shared_counter, &compare, exchange, LFDS700_MISC_CAS_STRENGTH_WEAK, result );
+ }
+ while( result == 0 );
+
+ atcs->local_counter++;
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_lfds700_pal_atomic_dwcas_state
+{
+ lfds700_pal_uint_t
+ local_counter;
+
+ lfds700_pal_atom_t volatile
+ (*shared_counter)[2];
+};
+
+/***** private prototyps *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_dwcas( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_pal_atomic_dwcas( struct lfds700_list_asu_state *list_of_logical_processors )
+{
+ lfds700_pal_uint_t
+ local_total = 0,
+ loop,
+ number_logical_processors;
+
+ lfds700_pal_atom_t volatile LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ shared_counter[2] = { 0, 0 };
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_lfds700_pal_atomic_dwcas_state
+ *atds;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+
+ /* TRD : here we test pal_dwcas
+
+ we run one thread per CPU
+ we use pal_dwcas() to increment a shared counter
+ every time a thread successfully increments the counter,
+ it increments a thread local counter
+ the threads run for ten seconds
+ after the threads finish, we total the local counters
+ they should equal the shared counter
+ */
+
+ internal_display_test_name( "Atomic DWCAS" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ atds = util_malloc_wrapper( sizeof(struct test_lfds700_pal_atomic_dwcas_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (atds+loop)->shared_counter = &shared_counter;
+ (atds+loop)->local_counter = 0;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_dwcas, atds+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ // TRD : results
+ LFDS700_MISC_BARRIER_LOAD;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ local_total += (atds+loop)->local_counter;
+
+ if( local_total == shared_counter[0] )
+ puts( "passed" );
+
+ if( local_total != shared_counter[0] )
+ {
+ printf( "%llu != %llu\n", (int long long unsigned) local_total, (int long long unsigned) shared_counter[0] );
+ puts( "failed" );
+ exit( EXIT_FAILURE );
+ }
+
+ // TRD : cleanup
+ free( atds );
+
+ return;
+}
+
+#pragma warning( disable : 4702 )
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_dwcas( void *util_thread_starter_thread_state )
+{
+ char unsigned
+ result;
+
+ lfds700_pal_uint_t
+ loop = 0;
+
+ lfds700_pal_atom_t LFDS700_PAL_ALIGN(LFDS700_PAL_ALIGN_DOUBLE_POINTER)
+ exchange[2],
+ compare[2];
+
+ struct test_lfds700_pal_atomic_dwcas_state
+ *atds;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ atds = (struct test_lfds700_pal_atomic_dwcas_state *) tsts->thread_user_state;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ while( loop++ < 10000000 )
+ {
+ compare[0] = (*atds->shared_counter)[0];
+ compare[1] = (*atds->shared_counter)[1];
+
+ do
+ {
+ exchange[0] = compare[0] + 1;
+ exchange[1] = compare[1];
+ LFDS700_PAL_ATOMIC_DWCAS( atds->shared_counter, compare, exchange, LFDS700_MISC_CAS_STRENGTH_WEAK, result );
+ }
+ while( result == 0 );
+
+ atds->local_counter++;
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ lfds700_pal_uint_t
+ counter,
+ *counter_array,
+ number_elements,
+ number_logical_processors;
+
+ lfds700_pal_uint_t volatile
+ *shared_exchange;
+};
+
+/***** private prototyps *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_exchange( void *util_thread_starter_thread_state );
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_atomic_exchange( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_pal_atomic_exchange( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum flag
+ atomic_exchange_success_flag = RAISED,
+ exchange_success_flag = RAISED;
+
+ lfds700_pal_uint_t
+ loop,
+ *merged_counter_arrays,
+ number_elements,
+ number_logical_processors,
+ subloop;
+
+ lfds700_pal_uint_t volatile LFDS700_PAL_ALIGN(LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ exchange;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct test_state
+ *ts;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : here we test pal_atomic_exchange
+
+ we have one thread per logical core
+ there is one variable which every thread will exchange to/from
+ we know the number of logical cores
+ the threads have a counter each, which begins with their logical core number plus one
+ (plus one because the exchange counter begins with 0 already in place)
+ (e.g. thread 0 begins with its counter at 1, thread 1 begins with its counter at 2, etc)
+
+ there is an array per thread of 1 million elements, each a counter, set to 0
+
+ when running, each thread increments its counter by the number of threads
+ the threads busy loop, exchanging
+ every time a thread pulls a number off the central, shared exchange variable,
+ it increments the counter for that variable in its thread-local counter array
+
+ (we're not using a global array, because we'd have to be atomic in our increments,
+ which is a slow-down we don't want)
+
+ at the end, we merge all the counter arrays and if the frequency for a counter is a value
+ other than 1, the exchange was not atomic
+
+ we perform the test twice, once with pal_atomic_exchange, once with a non-atomic exchange
+
+ we expect the atomic to pass and the non-atomic to fail
+ */
+
+ internal_display_test_name( "Atomic exchange" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(lfds700_pal_uint_t) * (number_logical_processors + 1) );
+
+ merged_counter_arrays = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_elements );
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ *(merged_counter_arrays+loop) = 0;
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->counter = loop + 1;
+ (ts+loop)->counter_array = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_elements );
+ for( subloop = 0 ; subloop < number_elements ; subloop++ )
+ *((ts+loop)->counter_array+subloop) = 0;
+ (ts+loop)->number_logical_processors = number_logical_processors;
+ (ts+loop)->shared_exchange = &exchange;
+ (ts+loop)->number_elements = number_elements;
+ }
+
+ exchange = 0;
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ // TRD : non-atomic
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_exchange, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ for( subloop = 0 ; subloop < number_logical_processors ; subloop++ )
+ *(merged_counter_arrays+loop) += *( (ts+subloop)->counter_array+loop );
+
+ /* TRD : the worker threads exit when their per-thread counter exceeds 1,000,000
+ as such the final number_logical_processors numbers are not read
+ we could change the threads to exit when the number they read exceeds 1,000,000
+ but then we'd need an if() in their work-loop,
+ and we need to go as fast as possible
+ */
+
+ for( loop = 0 ; loop < number_elements - number_logical_processors ; loop++ )
+ if( *(merged_counter_arrays+loop) != 1 )
+ exchange_success_flag = LOWERED;
+
+ // TRD : now for atomic exchange - we need to re-init the data structures
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ *(merged_counter_arrays+loop) = 0;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ for( subloop = 0 ; subloop < number_elements ; subloop++ )
+ *((ts+loop)->counter_array+subloop) = 0;
+
+ exchange = 0;
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_atomic_exchange, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ for( subloop = 0 ; subloop < number_logical_processors ; subloop++ )
+ *(merged_counter_arrays+loop) += *( (ts+subloop)->counter_array+loop );
+
+ for( loop = 0 ; loop < number_elements - number_logical_processors ; loop++ )
+ if( *(merged_counter_arrays+loop) != 1 )
+ atomic_exchange_success_flag = LOWERED;
+
+ // TRD : cleanup
+ free( merged_counter_arrays );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ free( (ts+loop)->counter_array );
+
+ util_thread_starter_delete( tts );
+ free( thread_handles );
+ free( ts );
+
+ /* TRD : results
+
+ on a single core, atomic and non-atomic exchange should both work
+
+ if we find our non-atomic test passes, then we can't really say anything
+ about whether or not the atomic test is really working
+ */
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ if( number_logical_processors == 1 )
+ {
+ if( exchange_success_flag == RAISED and atomic_exchange_success_flag == RAISED )
+ puts( "passed" );
+
+ if( exchange_success_flag != RAISED or atomic_exchange_success_flag != RAISED )
+ puts( "failed (atomic and non-atomic both failed)" );
+ }
+
+ if( number_logical_processors >= 2 )
+ {
+ if( atomic_exchange_success_flag == RAISED and exchange_success_flag == LOWERED )
+ puts( "passed" );
+
+ if( atomic_exchange_success_flag == RAISED and exchange_success_flag == RAISED )
+ puts( "indeterminate (atomic and non-atomic both passed)" );
+
+ if( atomic_exchange_success_flag == LOWERED )
+ {
+ puts( "failed (atomic failed)" );
+ exit( EXIT_FAILURE );
+ }
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_exchange( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ local_counter,
+ exchange;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ local_counter = ts->counter;
+
+ while( local_counter < ts->number_elements )
+ {
+ exchange = *ts->shared_exchange;
+ *ts->shared_exchange = local_counter;
+
+ ( *(ts->counter_array + exchange) )++;
+
+ local_counter += ts->number_logical_processors;
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_atomic_exchange( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ local_counter,
+ exchange;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ local_counter = ts->counter;
+
+ while( local_counter < ts->number_elements )
+ {
+ exchange = local_counter;
+
+ LFDS700_PAL_ATOMIC_EXCHANGE( ts->shared_exchange, &exchange );
+
+ ( *(ts->counter_array + exchange) )++;
+
+ local_counter += ts->number_logical_processors;
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_queue( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ if( LFDS700_MISC_ATOMIC_SUPPORT_DWCAS )
+ {
+ printf( "\n"
+ "Queue Tests\n"
+ "===========\n" );
+
+ test_lfds700_queue_alignment();
+ test_lfds700_queue_enqueuing( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_queue_dequeuing( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_queue_enqueuing_and_dequeuing( list_of_logical_processors );
+ test_lfds700_queue_rapid_enqueuing_and_dequeuing( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_queue_enqueuing_and_dequeuing_with_free( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_queue_enqueuing_with_malloc_and_dequeuing_with_free( list_of_logical_processors );
+ }
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_queue_alignment()
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ internal_display_test_name( "Alignment" );
+
+
+
+ // TRD : struct lfds700_queue_element
+ if( offsetof(struct lfds700_queue_element,next) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_queue_element,key) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : struct lfds700_queue_state
+ if( offsetof(struct lfds700_queue_state,enqueue) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_queue_state,dequeue) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_queue_state,user_state) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "queue", dvs );
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_queue_bss( struct lfds700_list_asu_state *list_of_logical_processors )
+{
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ printf( "\n"
+ "Queue (bounded, single consumer, single producer) Tests\n"
+ "=======================================================\n" );
+
+ // TRD : no alignment checks are required for queue_bss
+ test_lfds700_queue_bss_enqueuing();
+ test_lfds700_queue_bss_dequeuing();
+ test_lfds700_queue_bss_enqueuing_and_dequeuing( list_of_logical_processors );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_queue_bss_dequeuing()
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop;
+
+ struct lfds700_queue_bss_element
+ element_array[128];
+
+ struct lfds700_queue_bss_state
+ qs;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ void
+ *value;
+
+ /* TRD : create an empty queue
+ enqueue 128 elements
+ then dequeue the elements, in the same thread - we're API testing
+ it's a single producer queue, so we just do this in our current thread
+ since we're enqueuing and dequeuing in the same thread,
+
+ */
+
+ internal_display_test_name( "Dequeuing" );
+
+ lfds700_queue_bss_init_valid_on_current_logical_core( &qs, element_array, 128, NULL );
+
+ for( loop = 0 ; loop < 127 ; loop++ )
+ lfds700_queue_bss_enqueue( &qs, NULL, (void *) loop );
+
+ for( loop = 0 ; loop < 127 ; loop++ )
+ {
+ lfds700_queue_bss_dequeue( &qs, NULL, &value );
+ if( (lfds700_pal_uint_t) value != 127 - loop )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+
+ vi.min_elements = vi.max_elements = 0;
+
+ lfds700_queue_bss_query( &qs, LFDS700_QUEUE_BSS_QUERY_VALIDATE, &vi, &dvs );
+
+ lfds700_queue_bss_cleanup( &qs, NULL );
+
+ internal_display_test_result( 1, "queue_bss", dvs );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_queue_bss_enqueuing()
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ int
+ rv;
+
+ lfds700_pal_uint_t
+ loop;
+
+ struct lfds700_queue_bss_element
+ element_array[128];
+
+ struct lfds700_queue_bss_state
+ qs;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ /* TRD : create an empty queue
+ enqueue 128 elements
+ it's a single producer queue, so we just do this in our current thread
+ it's an API test
+ */
+
+ internal_display_test_name( "Enqueuing" );
+
+ lfds700_queue_bss_init_valid_on_current_logical_core( &qs, element_array, 128, NULL );
+
+ for( loop = 0 ; loop < 127 ; loop++ )
+ if( 1 != lfds700_queue_bss_enqueue(&qs, NULL, (void *) loop) )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : at this point enqueuing one more should return 0
+ rv = lfds700_queue_bss_enqueue( &qs, NULL, (void *) loop );
+
+ if( rv != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ vi.min_elements = vi.max_elements = 127;
+
+ lfds700_queue_bss_query( &qs, LFDS700_QUEUE_BSS_QUERY_VALIDATE, &vi, &dvs );
+
+ lfds700_queue_bss_cleanup( &qs, NULL );
+
+ internal_display_test_result( 1, "queue_bss", dvs );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ enum flag
+ error_flag;
+
+ struct lfds700_queue_bss_state
+ *qs;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_enqueuer( void *util_thread_starter_thread_state );
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_dequeuer( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_queue_bss_enqueuing_and_dequeuing( struct lfds700_list_asu_state *list_of_logical_processors )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_logical_processors,
+ subloop;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_queue_bss_element
+ element_array[4];
+
+ struct lfds700_queue_bss_state
+ qs;
+
+ struct test_pal_logical_processor
+ *lp,
+ *lp_first;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+
+ /* TRD : so, this is the real test
+ problem is, because we use memory barriers only
+ and we only support one producer and one consumer
+ we need to ensure these threads are on different physical cores
+ if they're on the same core, the code would work even without memory barriers
+
+ problem is, in the test application, we only know the *number* of logical cores
+ obtaining topology information adds a great deal of complexity to the test app
+ and makes porting much harder
+
+ so, we know how many logical cores there are; my thought is to partially
+ permutate over them - we always run the producer on core 0, but we iterate
+ over the other logical cores, running the test once each time, with the
+ consumer being run on core 0, then core 1, then core 2, etc
+
+ (we run on core 0 for the single-cpu case; it's redundent, since a single
+ logical core running both producer and consumer will work, but otherwise
+ we have to skip the test, which is confusing for the user)
+
+ the test is one thread enqueuing and one thread dequeuing for two seconds
+ */
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ internal_display_test_name( "Enqueuing and dequeuing (%d seconds)", number_logical_processors * 2 );
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * 2 );
+
+ for( loop = 0 ; loop < 2 ; loop++ )
+ {
+ (ts+loop)->qs = &qs;
+ (ts+loop)->error_flag = LOWERED;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * 2 );
+
+ /* TRD : producer always on core 0
+ iterate over the other cores with consumer
+ */
+
+ lasue = LFDS700_LIST_ASU_GET_START( *list_of_logical_processors );
+ lp_first = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ while( lasue != NULL )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lfds700_queue_bss_init_valid_on_current_logical_core( &qs, element_array, 4, NULL );
+
+ util_thread_starter_new( &tts, 2 );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ util_thread_starter_start( tts, &thread_handles[0], 0, lp_first, thread_enqueuer, ts );
+ util_thread_starter_start( tts, &thread_handles[1], 1, lp, thread_dequeuer, ts+1 );
+
+ util_thread_starter_run( tts );
+
+ for( subloop = 0 ; subloop < 2 ; subloop++ )
+ test_pal_thread_wait( thread_handles[subloop] );
+
+ util_thread_starter_delete( tts );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ lfds700_queue_bss_cleanup( &qs, NULL );
+
+ lasue = LFDS700_LIST_ASU_GET_NEXT( *lasue );
+ }
+
+ if( (ts+1)->error_flag == RAISED )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ free( thread_handles );
+
+ free( ts );
+
+ internal_display_test_result( 1, "queue_bss", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_enqueuer( void *util_thread_starter_thread_state )
+{
+ int
+ rv;
+
+ lfds700_pal_uint_t
+ datum = 0,
+ time_loop = 0;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + 2 )
+ {
+ rv = lfds700_queue_bss_enqueue( ts->qs, NULL, (void *) datum );
+
+ if( rv == 1 )
+ if( ++datum == 4 )
+ datum = 0;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_dequeuer( void *util_thread_starter_thread_state )
+{
+ int
+ rv;
+
+ lfds700_pal_uint_t
+ datum,
+ expected_datum = 0,
+ time_loop = 0;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + 2 )
+ {
+ rv = lfds700_queue_bss_dequeue( ts->qs, NULL, (void *) &datum );
+
+ if( rv == 1 )
+ {
+ if( datum != expected_datum )
+ ts->error_flag = RAISED;
+
+ if( ++expected_datum == 4 )
+ expected_datum = 0;
+ }
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ enum flag
+ error_flag;
+
+ struct lfds700_queue_state
+ *qs;
+};
+
+struct test_element
+{
+ struct lfds700_queue_element
+ qe;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_simple_dequeuer( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_queue_dequeuing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements_with_dummy_element,
+ number_elements_without_dummy_element,
+ number_logical_processors;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_queue_state
+ qs;
+
+ struct lfds700_misc_validation_info
+ vi = { 0, 0 };
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te_array;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : create a queue, add 1,000,000 elements
+
+ use a single thread to enqueue every element
+ each elements user data is an incrementing counter
+
+ then run one thread per CPU
+ where each busy-works dequeuing
+
+ when an element is dequeued, we check (on a per-thread basis) the
+ value dequeued is greater than the element previously dequeued
+
+ note we have no variation in the test for CAS+GC vs DWCAS
+ this is because all we do is dequeue
+ what we actually want to stress test is the queue
+ not CAS
+ so it's better to let the dequeue run as fast as possible
+ */
+
+ internal_display_test_name( "Dequeuing" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ number_elements_with_dummy_element = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / sizeof(struct test_element);
+ number_elements_without_dummy_element = number_elements_with_dummy_element - 1;
+
+ te_array = util_aligned_malloc( sizeof(struct test_element) * number_elements_with_dummy_element, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds700_queue_init_valid_on_current_logical_core( &qs, &(te_array + number_elements_without_dummy_element)->qe, &ps, NULL );
+
+ for( loop = 0 ; loop < number_elements_without_dummy_element ; loop++ )
+ {
+ LFDS700_QUEUE_SET_VALUE_IN_ELEMENT( (te_array+loop)->qe, loop );
+ lfds700_queue_enqueue( &qs, &(te_array+loop)->qe, &ps );
+ }
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->qs = &qs;
+ (ts+loop)->error_flag = LOWERED;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_simple_dequeuer, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ // TRD : check queue is empty
+ lfds700_queue_query( &qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ // TRD : check for raised error flags
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ if( (ts+loop)->error_flag == RAISED )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ free( ts );
+
+ util_aligned_free( te_array );
+
+ lfds700_queue_cleanup( &qs, NULL );
+
+ internal_display_test_result( 1, "queue", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_simple_dequeuer( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ *prev_value,
+ *value;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_queue_element
+ *qe;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_queue_dequeue( ts->qs, &qe, &ps );
+ prev_value = LFDS700_QUEUE_GET_VALUE_FROM_ELEMENT( *qe );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ while( lfds700_queue_dequeue(ts->qs, &qe, &ps) )
+ {
+ value = LFDS700_QUEUE_GET_VALUE_FROM_ELEMENT( *qe );
+
+ if( value <= prev_value )
+ ts->error_flag = RAISED;
+
+ prev_value = value;
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ lfds700_pal_uint_t
+ number_elements,
+ thread_number;
+
+ struct lfds700_queue_state
+ *qs;
+
+ struct test_element
+ *te_array;
+};
+
+struct test_element
+{
+ struct lfds700_queue_element
+ qe;
+
+ lfds700_pal_uint_t
+ counter,
+ thread_number;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_simple_enqueuer( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_queue_enqueuing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ *per_thread_counters,
+ loop,
+ number_elements,
+ number_logical_processors;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_queue_element
+ dummy_qe,
+ *qe;
+
+ struct lfds700_queue_state
+ qs;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : create an empty queue
+ then run one thread per CPU
+ where each thread busy-works, enqueuing elements from a freelist (one local freelist per thread)
+ until 100000 elements are enqueued, per thread
+ each element's void pointer of user data is a struct containing thread number and element number
+ where element_number is a thread-local counter starting at 0
+
+ when we're done, we check that all the elements are present
+ and increment on a per-thread basis
+ */
+
+ internal_display_test_name( "Enqueuing" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors );
+
+ lfds700_queue_init_valid_on_current_logical_core( &qs, &dummy_qe, &ps, NULL );
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->qs = &qs;
+ (ts+loop)->thread_number = loop;
+ (ts+loop)->number_elements = number_elements;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_simple_enqueuer, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ /* TRD : first, validate the queue
+
+ then dequeue
+ we expect to find element numbers increment on a per thread basis
+ */
+
+ vi.min_elements = vi.max_elements = number_elements * number_logical_processors;
+
+ lfds700_queue_query( &qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ while( dvs == LFDS700_MISC_VALIDITY_VALID and lfds700_queue_dequeue(&qs, &qe, &ps) )
+ {
+ te = LFDS700_QUEUE_GET_VALUE_FROM_ELEMENT( *qe );
+
+ if( te->thread_number >= number_logical_processors )
+ {
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( te->counter > per_thread_counters[te->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( te->counter < per_thread_counters[te->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( te->counter == per_thread_counters[te->thread_number] )
+ per_thread_counters[te->thread_number]++;
+ }
+
+ free( per_thread_counters );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ util_aligned_free( (ts+loop)->te_array );
+
+ free( ts );
+
+ lfds700_queue_cleanup( &qs, NULL );
+
+ internal_display_test_result( 1, "queue", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_simple_enqueuer( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ loop;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ ts->te_array = util_aligned_malloc( sizeof(struct test_element) * ts->number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < ts->number_elements ; loop++ )
+ {
+ (ts->te_array+loop)->thread_number = ts->thread_number;
+ (ts->te_array+loop)->counter = loop;
+ }
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ for( loop = 0 ; loop < ts->number_elements ; loop++ )
+ {
+ LFDS700_QUEUE_SET_VALUE_IN_ELEMENT( (ts->te_array+loop)->qe, ts->te_array+loop );
+ lfds700_queue_enqueue( ts->qs, &(ts->te_array+loop)->qe, &ps );
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ enum flag
+ error_flag;
+
+ lfds700_pal_uint_t
+ counter,
+ number_logical_processors,
+ *per_thread_counters,
+ thread_number;
+
+ struct lfds700_queue_state
+ *qs;
+};
+
+struct test_element
+{
+ struct lfds700_queue_element
+ qe,
+ *qe_use;
+
+ lfds700_pal_uint_t
+ counter,
+ thread_number;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_enqueuer_and_dequeuer( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_queue_enqueuing_and_dequeuing( struct lfds700_list_asu_state *list_of_logical_processors )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_logical_processors,
+ subloop;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_queue_state
+ qs;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te_array;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : qt can be any value in its range
+
+ /* TRD : create a queue with one element per thread
+ each thread constly dequeues and enqueues from that one queue
+ where when enqueuing sets in the element
+ its thread number and counter
+ and when dequeuing, checks the thread number and counter
+ against previously seen counter for that thread
+ where it should always see a higher number
+ */
+
+ internal_display_test_name( "Enqueuing and dequeuing (%d seconds)", TEST_DURATION_IN_SECONDS );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ te_array = util_aligned_malloc( sizeof(struct test_element) * (number_logical_processors+1), LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds700_queue_init_valid_on_current_logical_core( &qs, &(te_array+number_logical_processors)->qe, &ps, NULL );
+
+ // TRD : we assume the test will iterate at least once (or we'll have a false negative)
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (te_array+loop)->thread_number = loop;
+ (te_array+loop)->counter = 0;
+ LFDS700_QUEUE_SET_VALUE_IN_ELEMENT( (te_array+loop)->qe, te_array+loop );
+ lfds700_queue_enqueue( &qs, &(te_array+loop)->qe, &ps );
+ }
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->qs = &qs;
+ (ts+loop)->thread_number = loop;
+ (ts+loop)->counter = 0;
+ (ts+loop)->error_flag = LOWERED;
+ (ts+loop)->per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors );
+ (ts+loop)->number_logical_processors = number_logical_processors;
+
+ for( subloop = 0 ; subloop < number_logical_processors ; subloop++ )
+ *((ts+loop)->per_thread_counters+subloop) = 0;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_enqueuer_and_dequeuer, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_logical_processors;
+
+ lfds700_queue_query( &qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ if( (ts+loop)->error_flag == RAISED )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ free( (ts+loop)->per_thread_counters );
+
+ util_aligned_free( te_array );
+
+ free( ts );
+
+ lfds700_queue_cleanup( &qs, NULL );
+
+ internal_display_test_result( 1, "queue", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_enqueuer_and_dequeuer( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ time_loop = 0;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_queue_element
+ *qe;
+
+ struct test_element
+ *te;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ lfds700_queue_dequeue( ts->qs, &qe, &ps );
+ te = LFDS700_QUEUE_GET_VALUE_FROM_ELEMENT( *qe );
+
+ if( te->thread_number >= ts->number_logical_processors )
+ ts->error_flag = RAISED;
+ else
+ {
+ if( te->counter < ts->per_thread_counters[te->thread_number] )
+ ts->error_flag = RAISED;
+
+ if( te->counter >= ts->per_thread_counters[te->thread_number] )
+ ts->per_thread_counters[te->thread_number] = te->counter+1;
+ }
+
+ te->thread_number = ts->thread_number;
+ te->counter = ++ts->counter;
+
+ LFDS700_QUEUE_SET_VALUE_IN_ELEMENT( *qe, te );
+ lfds700_queue_enqueue( ts->qs, qe, &ps );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ lfds700_pal_uint_t
+ number_of_elements_per_thread;
+
+ struct lfds700_queue_state
+ *qs;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_enqueue_dequeuer_with_free( void *util_thread_starter_thread_state );
+static void queue_element_cleanup_callback( struct lfds700_queue_state *qs, struct lfds700_queue_element *qe, enum lfds700_misc_flag dummy_element_flag );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_queue_enqueuing_and_dequeuing_with_free( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors,
+ number_of_elements_per_thread;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_queue_element
+ *qe;
+
+ struct lfds700_queue_state
+ qs;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_state
+ *tts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : the M&Q queue supports free()ing queue elements after they've been dequeued
+ we need to test this
+ we spawn one thread per logical core
+ there's one master queue which all threads work on
+ we create one freelist per thread
+ and allocate as many queue elements as we can (no payload)
+ - but note each allocate is its own malloc()
+ each freelist receives an equal share (i.e. we get the mallocs out of the way)
+ each thread enqueues as rapidly as possible
+ and dequeues as rapidly as possible
+ (i.e. each thread loops, doing an enqueue and a dequeue)
+ when the dequeue is done, the element is free()ed
+ */
+
+ internal_display_test_name( "Enqueuing and dequeuing with free" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct lfds700_freelist_element) + sizeof(struct lfds700_queue_element) );
+ number_of_elements_per_thread = number_elements / number_logical_processors;
+ qe = util_aligned_malloc( sizeof(struct lfds700_queue_element), (lfds700_pal_uint_t) LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds700_queue_init_valid_on_current_logical_core( &qs, qe, &ps, NULL );
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->qs = &qs;
+ (ts+loop)->number_of_elements_per_thread = number_of_elements_per_thread;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_enqueue_dequeuer_with_free, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ vi.min_elements = 0;
+ vi.max_elements = 0;
+
+ lfds700_queue_query( &qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ lfds700_queue_cleanup( &qs, queue_element_cleanup_callback );
+
+ free( ts );
+
+ internal_display_test_result( 1, "queue", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_enqueue_dequeuer_with_free( void *util_thread_starter_thread_state )
+{
+ enum flag
+ finished_flag = LOWERED;
+
+ lfds700_pal_uint_t
+ loop;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_freelist_element
+ *fe,
+ *fe_array;
+
+ struct lfds700_freelist_state
+ fs;
+
+ struct lfds700_queue_element
+ *qe;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_freelist_init_valid_on_current_logical_core( &fs, NULL );
+
+ fe_array = util_malloc_wrapper( sizeof(struct lfds700_freelist_element) * ts->number_of_elements_per_thread );
+
+ for( loop = 0 ; loop < ts->number_of_elements_per_thread ; loop++ )
+ {
+ qe = util_aligned_malloc( sizeof(struct lfds700_queue_element), LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LFDS700_FREELIST_SET_VALUE_IN_ELEMENT( fe_array[loop], qe );
+ lfds700_freelist_push( &fs, &fe_array[loop], &ps );
+ }
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ while( finished_flag == LOWERED )
+ {
+ loop = 0;
+ while( loop++ < 1000 and lfds700_freelist_pop(&fs, &fe, &ps) )
+ {
+ qe = LFDS700_FREELIST_GET_VALUE_FROM_ELEMENT( *fe );
+ lfds700_queue_enqueue( ts->qs, qe, &ps );
+ }
+
+ if( loop < 1000 )
+ finished_flag = RAISED;
+
+ loop = 0;
+ while( loop++ < 1000 and lfds700_queue_dequeue(ts->qs, &qe, &ps) )
+ util_aligned_free( qe );
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ lfds700_freelist_cleanup( &fs, NULL );
+
+ free( fe_array );
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void queue_element_cleanup_callback( struct lfds700_queue_state *qs, struct lfds700_queue_element *qe, enum lfds700_misc_flag dummy_element_flag )
+{
+ assert( qs != NULL );
+ assert( qe != NULL );
+ // TRD : dummy_element_flag can be any value in its range
+
+ util_aligned_free( qe );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ struct lfds700_queue_state
+ *qs;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_enqueuer_with_malloc_and_dequeuer_with_free( void *util_thread_starter_thread_state );
+static void queue_element_cleanup_callback( struct lfds700_queue_state *qs, struct lfds700_queue_element *qe, enum lfds700_misc_flag dummy_element_flag );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_queue_enqueuing_with_malloc_and_dequeuing_with_free( struct lfds700_list_asu_state *list_of_logical_processors )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_logical_processors;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_queue_element
+ *qe;
+
+ struct lfds700_queue_state
+ qs;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : qt can be any value in its range
+
+ /* TRD : one thread per logical core
+ each thread loops for ten seconds
+ mallocs and enqueues 1k elements, then dequeues and frees 1k elements
+ */
+
+ internal_display_test_name( "Enqueuing with malloc dequeuing with free (%d seconds)", TEST_DURATION_IN_SECONDS );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ qe = util_aligned_malloc( sizeof(struct lfds700_queue_element), LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds700_queue_init_valid_on_current_logical_core( &qs, qe, &ps, NULL );
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ (ts+loop)->qs = &qs;
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_enqueuer_with_malloc_and_dequeuer_with_free, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = 0;
+
+ lfds700_queue_query( &qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ free( ts );
+
+ lfds700_queue_cleanup( &qs, queue_element_cleanup_callback );
+
+ internal_display_test_result( 1, "queue", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_enqueuer_with_malloc_and_dequeuer_with_free( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ loop,
+ time_loop = 0;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_queue_element
+ *qe;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ {
+ qe = util_aligned_malloc( sizeof(struct lfds700_queue_element), LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds700_queue_enqueue( ts->qs, qe, &ps );
+ }
+
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ {
+ lfds700_queue_dequeue( ts->qs, &qe, &ps );
+ util_aligned_free( qe );
+ }
+
+ if( time_loop++ == REDUCED_TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void queue_element_cleanup_callback( struct lfds700_queue_state *qs, struct lfds700_queue_element *qe, enum lfds700_misc_flag dummy_element_flag )
+{
+ assert( qs != NULL );
+ assert( qe != NULL );
+ // TRD : dummy_element_flag can be any value in its range
+
+ util_aligned_free( qe );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ lfds700_pal_uint_t
+ counter,
+ thread_number;
+
+ struct lfds700_queue_state
+ *qs;
+};
+
+struct test_element
+{
+ struct lfds700_queue_element
+ qe,
+ *qe_use;
+
+ lfds700_pal_uint_t
+ counter,
+ thread_number;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_rapid_enqueuer_and_dequeuer( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_queue_rapid_enqueuing_and_dequeuing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements_with_dummy_element,
+ number_elements_without_dummy_element,
+ number_logical_processors,
+ *per_thread_counters;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_queue_element
+ *qe;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct lfds700_queue_state
+ qs;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te_array,
+ *te;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we create a single queue with 50,000 elements
+ we don't want too many elements, so we ensure plenty of element re-use
+ each thread simply loops dequeuing and enqueuing
+ where the user data indicates thread number and an increment counter
+ vertification is that the counter increments on a per-thread basis
+ */
+
+ internal_display_test_name( "Rapid enqueuing and dequeuing (%d seconds)", TEST_DURATION_IN_SECONDS );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ number_elements_with_dummy_element = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / sizeof(struct test_element);
+
+ if( number_elements_with_dummy_element > (10000 * number_logical_processors) + 1 )
+ number_elements_with_dummy_element = (10000 * number_logical_processors) + 1;
+
+ number_elements_without_dummy_element = number_elements_with_dummy_element - 1;
+
+ vi.min_elements = number_elements_without_dummy_element;
+ vi.max_elements = number_elements_without_dummy_element;
+
+ te_array = util_aligned_malloc( sizeof(struct test_element) * number_elements_with_dummy_element, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds700_queue_init_valid_on_current_logical_core( &qs, &(te_array+number_elements_without_dummy_element)->qe, &ps, NULL );
+
+ // TRD : we assume the test will iterate at least once (or we'll have a false negative)
+ for( loop = 0 ; loop < number_elements_without_dummy_element ; loop++ )
+ {
+ (te_array+loop)->thread_number = loop;
+ (te_array+loop)->counter = 0;
+ LFDS700_QUEUE_SET_VALUE_IN_ELEMENT( (te_array+loop)->qe, te_array+loop );
+ lfds700_queue_enqueue( &qs, &(te_array+loop)->qe, &ps );
+ }
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->qs = &qs;
+ (ts+loop)->thread_number = loop;
+ (ts+loop)->counter = 0;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_rapid_enqueuer_and_dequeuer, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ lfds700_queue_query( &qs, LFDS700_QUEUE_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ // TRD : now check results
+ per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ while( dvs == LFDS700_MISC_VALIDITY_VALID and lfds700_queue_dequeue(&qs, &qe, &ps) )
+ {
+ te = LFDS700_QUEUE_GET_VALUE_FROM_ELEMENT( *qe );
+
+ if( te->thread_number >= number_logical_processors )
+ {
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( per_thread_counters[te->thread_number] == 0 )
+ per_thread_counters[te->thread_number] = te->counter;
+
+ if( te->counter > per_thread_counters[te->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( te->counter < per_thread_counters[te->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( te->counter == per_thread_counters[te->thread_number] )
+ per_thread_counters[te->thread_number]++;
+ }
+
+ free( per_thread_counters );
+
+ lfds700_queue_cleanup( &qs, NULL );
+
+ util_aligned_free( te_array );
+
+ free( ts );
+
+ internal_display_test_result( 1, "queue", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_rapid_enqueuer_and_dequeuer( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ time_loop = 0;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_queue_element
+ *qe;
+
+ struct test_element
+ *te;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ lfds700_queue_dequeue( ts->qs, &qe, &ps );
+ te = LFDS700_QUEUE_GET_VALUE_FROM_ELEMENT( *qe );
+
+ te->thread_number = ts->thread_number;
+ te->counter = ts->counter++;
+
+ LFDS700_QUEUE_SET_VALUE_IN_ELEMENT( *qe, te );
+ lfds700_queue_enqueue( ts->qs, qe, &ps );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_ringbuffer( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ if( LFDS700_MISC_ATOMIC_SUPPORT_DWCAS )
+ {
+ printf( "\n"
+ "Ringbuffer Tests\n"
+ "================\n" );
+
+ test_lfds700_ringbuffer_reading( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_ringbuffer_reading_and_writing( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_ringbuffer_writing( list_of_logical_processors, memory_in_megabytes );
+ }
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ enum flag
+ error_flag;
+
+ lfds700_pal_uint_t
+ read_count;
+
+ struct lfds700_ringbuffer_state
+ *rs;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_simple_reader( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_ringbuffer_reading( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs[2] = { LFDS700_MISC_VALIDITY_VALID, LFDS700_MISC_VALIDITY_VALID };
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements_with_dummy_element,
+ number_elements_without_dummy_element,
+ number_logical_processors,
+ total_read = 0;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_ringbuffer_element
+ *re_array;
+
+ struct lfds700_ringbuffer_state
+ rs;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we create a single ringbuffer
+ with 1,000,000 elements
+ we populate the ringbuffer, where the
+ user data is an incrementing counter
+
+ we create one thread per CPU
+ where each thread busy-works,
+ reading until the ringbuffer is empty
+
+ each thread keep track of the number of reads it manages
+ and that each user data it reads is greater than the
+ previous user data that was read
+ */
+
+ internal_display_test_name( "Reading" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ number_elements_with_dummy_element = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / sizeof(struct lfds700_ringbuffer_element);
+ number_elements_without_dummy_element = number_elements_with_dummy_element - 1;
+
+ vi.min_elements = 0;
+ vi.max_elements = number_elements_without_dummy_element;
+
+ re_array = util_aligned_malloc( sizeof(struct lfds700_ringbuffer_element) * number_elements_with_dummy_element, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds700_ringbuffer_init_valid_on_current_logical_core( &rs, re_array, number_elements_with_dummy_element, &ps, NULL );
+
+ // TRD : init the ringbuffer contents for the test
+ for( loop = 0 ; loop < number_elements_without_dummy_element ; loop++ )
+ lfds700_ringbuffer_write( &rs, NULL, (void *) (size_t) loop, NULL, NULL, NULL, &ps );
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->rs = &rs;
+ (ts+loop)->read_count = 0;
+ (ts+loop)->error_flag = LOWERED;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_simple_reader, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ lfds700_ringbuffer_query( &rs, LFDS700_RINGBUFFER_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) dvs );
+
+ // TRD : check for raised error flags
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ if( (ts+loop)->error_flag == RAISED )
+ dvs[0] = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : check thread reads total to 1,000,000
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ total_read += (ts+loop)->read_count;
+
+ if( total_read < number_elements_without_dummy_element )
+ dvs[0] = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( total_read > number_elements_without_dummy_element )
+ dvs[0] = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ free( ts );
+
+ lfds700_ringbuffer_cleanup( &rs, NULL );
+
+ util_aligned_free( re_array );
+
+ internal_display_test_result( 2, "queue", dvs[0], "freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_simple_reader( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ *prev_value,
+ *value;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_ringbuffer_read( ts->rs, NULL, (void **) &prev_value, &ps );
+ ts->read_count++;
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ while( lfds700_ringbuffer_read(ts->rs, NULL, (void **) &value, &ps) )
+ {
+ if( value <= prev_value )
+ ts->error_flag = RAISED;
+
+ prev_value = value;
+
+ ts->read_count++;
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ enum flag
+ error_flag;
+
+ lfds700_pal_uint_t
+ counter,
+ number_logical_processors,
+ *per_thread_counters,
+ thread_number;
+
+ struct lfds700_ringbuffer_state
+ *rs;
+};
+
+struct test_element
+{
+ lfds700_pal_uint_t
+ datum,
+ thread_number;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_reader_writer( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_ringbuffer_reading_and_writing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs[2] = { LFDS700_MISC_VALIDITY_VALID, LFDS700_MISC_VALIDITY_VALID };
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements_with_dummy_element,
+ number_elements_without_dummy_element,
+ number_logical_processors,
+ subloop;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_ringbuffer_element
+ *re_array;
+
+ struct lfds700_ringbuffer_state
+ rs;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te_array;
+
+ struct test_state
+ *ts;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we create a single ringbuffer
+ with 100,000 elements
+ the ringbuffers starts empty
+
+ we create one thread per CPU
+ where each thread busy-works writing
+ and then immediately reading
+ for ten seconds
+
+ the user data in each written element is a combination
+ of the thread number and the counter
+
+ while a thread runs, it keeps track of the
+ counters for the other threads and throws an error
+ if it sees the number stay the same or decrease
+ */
+
+ internal_display_test_name( "Reading and writing (%d seconds)", TEST_DURATION_IN_SECONDS );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ number_elements_with_dummy_element = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) + sizeof(struct lfds700_ringbuffer_element) );
+ number_elements_without_dummy_element = number_elements_with_dummy_element - 1;
+
+ vi.min_elements = 0;
+ vi.max_elements = number_elements_without_dummy_element;
+
+ re_array = util_aligned_malloc( sizeof(struct lfds700_ringbuffer_element) * number_elements_with_dummy_element, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds700_ringbuffer_init_valid_on_current_logical_core( &rs, re_array, number_elements_with_dummy_element, &ps, NULL );
+
+ te_array = util_malloc_wrapper( sizeof(struct test_element) * number_elements_without_dummy_element );
+
+ // TRD : populate the ringbuffer
+ for( loop = 0 ; loop < number_elements_without_dummy_element ; loop++ )
+ {
+ te_array[loop].thread_number = 0;
+ te_array[loop].datum = (lfds700_pal_uint_t) -1 ;
+ lfds700_ringbuffer_write( &rs, NULL, &te_array[loop], NULL, NULL, NULL, &ps );
+ }
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->rs = &rs;
+ (ts+loop)->thread_number = loop;
+ (ts+loop)->counter = 0;
+ (ts+loop)->number_logical_processors = number_logical_processors;
+ (ts+loop)->error_flag = LOWERED;
+ (ts+loop)->per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors );
+
+ for( subloop = 0 ; subloop < number_logical_processors ; subloop++ )
+ *((ts+loop)->per_thread_counters+subloop) = 0;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_reader_writer, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ lfds700_ringbuffer_query( &rs, LFDS700_RINGBUFFER_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) dvs );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ if( (ts+loop)->error_flag == RAISED )
+ dvs[0] = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ free( (ts+loop)->per_thread_counters );
+
+ free( ts );
+
+ lfds700_ringbuffer_cleanup( &rs, NULL );
+
+ util_aligned_free( re_array );
+
+ free( te_array );
+
+ internal_display_test_result( 2, "queue", dvs[0], "freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_reader_writer( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ time_loop = 0;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_element
+ *te;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ lfds700_ringbuffer_read( ts->rs, NULL, (void **) &te, &ps );
+
+ if( te->thread_number >= ts->number_logical_processors )
+ ts->error_flag = RAISED;
+ else
+ {
+ if( te->datum < ts->per_thread_counters[te->thread_number] )
+ ts->error_flag = RAISED;
+
+ if( te->datum >= ts->per_thread_counters[te->thread_number] )
+ ts->per_thread_counters[te->thread_number] = te->datum+1;
+ }
+
+ te->thread_number = ts->thread_number;
+ te->datum = ts->counter++;
+
+ lfds700_ringbuffer_write( ts->rs, NULL, te, NULL, NULL, NULL, &ps );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ lfds700_pal_uint_t
+ thread_number,
+ datum;
+};
+
+struct test_state
+{
+ lfds700_pal_uint_t
+ thread_number,
+ write_count;
+
+ struct test_element
+ te;
+
+ struct lfds700_ringbuffer_state
+ *rs;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_simple_writer( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_ringbuffer_writing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs[2] = { LFDS700_MISC_VALIDITY_VALID, LFDS700_MISC_VALIDITY_VALID };
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements_with_dummy_element,
+ number_elements_without_dummy_element,
+ number_logical_processors,
+ *per_thread_counters;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_ringbuffer_element
+ *re_array;
+
+ struct lfds700_ringbuffer_state
+ rs;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te,
+ *te_array;
+
+ struct test_state
+ *ts;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we create a single ringbuffer
+ with n elements
+ we create n test elements
+ which are thread_number/counter pairs
+ init them to safe values
+ and fully populate the ringbuffer
+
+ we create one thread per CPU
+ where each thread busy-works writing
+ for ten seconds; each thread has one extra element
+ which it uses for the first write and after that
+ it uses the element it picks up from overwriting
+
+ the user data in each written element is a combination
+ of the thread number and the counter
+
+ after the threads are complete, we validate by
+ checking the user data counters increment on a per thread
+ basis
+ */
+
+ internal_display_test_name( "Writing (%d seconds)", TEST_DURATION_IN_SECONDS );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ number_elements_with_dummy_element = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) + sizeof(struct lfds700_ringbuffer_element) );
+ number_elements_without_dummy_element = number_elements_with_dummy_element - 1;
+
+ vi.min_elements = number_elements_without_dummy_element;
+ vi.max_elements = number_elements_without_dummy_element;
+
+ re_array = util_aligned_malloc( sizeof(struct lfds700_ringbuffer_element) * number_elements_with_dummy_element, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds700_ringbuffer_init_valid_on_current_logical_core( &rs, re_array, number_elements_with_dummy_element, &ps, NULL );
+
+ te_array = util_malloc_wrapper( sizeof(struct lfds700_ringbuffer_element) * number_elements_without_dummy_element );
+
+ // TRD : init the test elements and write them into the ringbuffer
+ for( loop = 0 ; loop < number_elements_without_dummy_element ; loop++ )
+ {
+ te_array[loop].thread_number = 0;
+ te_array[loop].datum = 0;
+ lfds700_ringbuffer_write( &rs, NULL, &te_array[loop], NULL, NULL, NULL, &ps );
+ }
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->rs = &rs;
+ (ts+loop)->thread_number = loop;
+ (ts+loop)->write_count = 0;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_simple_writer, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ // TRD : now check results
+ per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ lfds700_ringbuffer_query( &rs, LFDS700_RINGBUFFER_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ while( dvs[0] == LFDS700_MISC_VALIDITY_VALID and dvs[1] == LFDS700_MISC_VALIDITY_VALID and lfds700_ringbuffer_read(&rs, NULL, (void **) &te, &ps) )
+ {
+ if( te->thread_number >= number_logical_processors )
+ {
+ dvs[0] = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( per_thread_counters[te->thread_number] == 0 )
+ per_thread_counters[te->thread_number] = te->datum;
+
+ if( te->datum < per_thread_counters[te->thread_number] )
+ dvs[0] = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( te->datum >= per_thread_counters[te->thread_number] )
+ per_thread_counters[te->thread_number] = te->datum+1;
+ }
+
+ free( per_thread_counters );
+
+ lfds700_ringbuffer_cleanup( &rs, NULL );
+
+ free( ts );
+
+ util_aligned_free( re_array );
+
+ free( te_array );
+
+ internal_display_test_result( 2, "queue", dvs[0], "freelist", dvs[1] );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_simple_writer( void *util_thread_starter_thread_state )
+{
+ enum lfds700_misc_flag
+ overwrite_occurred_flag;
+
+ lfds700_pal_uint_t
+ time_loop = 0;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_element
+ *te;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ ts->te.thread_number = 0;
+ ts->te.datum = 0;
+
+ lfds700_ringbuffer_write( ts->rs, NULL, &ts->te, &overwrite_occurred_flag, NULL, (void **) &te, &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ te->thread_number = ts->thread_number;
+ te->datum = ts->write_count++;
+
+ lfds700_ringbuffer_write( ts->rs, NULL, te, &overwrite_occurred_flag, NULL, (void **) &te, &ps );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_stack( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ if( LFDS700_MISC_ATOMIC_SUPPORT_DWCAS )
+ {
+ printf( "\n"
+ "Stack Tests\n"
+ "===========\n" );
+
+ test_lfds700_stack_alignment();
+ test_lfds700_stack_popping( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_stack_pushing( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_stack_popping_and_pushing( list_of_logical_processors, memory_in_megabytes );
+ test_lfds700_stack_rapid_popping_and_pushing( list_of_logical_processors );
+ }
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void test_lfds700_stack_alignment()
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ internal_display_test_name( "Alignment" );
+
+
+
+ // TRD : struct lfds700_stack_state
+ if( offsetof(struct lfds700_stack_state,top) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( offsetof(struct lfds700_stack_state,user_state) % LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "stack", dvs );
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ struct lfds700_stack_state
+ *ss;
+};
+
+struct test_element
+{
+ struct lfds700_stack_element
+ se;
+
+ enum flag
+ popped_flag;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_popping( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_stack_popping( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_stack_state
+ ss;
+
+ struct lfds700_misc_validation_info
+ vi = { 0, 0 };
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te_array;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we create a stack
+
+ we then populate the stack with 1,000,000 elements
+ each void pointer of data points to the containing test element
+
+ we then run one thread per CPU
+ where each thread loops, popping as quickly as possible
+ upon popping, a flag is set in the containing test element
+
+ the threads run till the source stack is empty
+
+ we then check the poppged flag, all should be raised
+
+ then tidy up
+
+ no CAS+GC code, as we only pop
+ */
+
+ internal_display_test_name( "Popping" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_stack_init_valid_on_current_logical_core( &ss, NULL );
+
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / sizeof(struct test_element) ;
+
+ te_array = util_aligned_malloc( sizeof(struct test_element) * number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ {
+ (te_array+loop)->popped_flag = LOWERED;
+ LFDS700_STACK_SET_VALUE_IN_ELEMENT( (te_array+loop)->se, te_array+loop );
+ lfds700_stack_push( &ss, &(te_array+loop)->se, &ps );
+ }
+
+ ts = util_aligned_malloc( sizeof(struct test_state) * number_logical_processors, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ (ts+loop)->ss = &ss;
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_popping, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ lfds700_stack_query( &ss, LFDS700_STACK_QUERY_SINGLETHREADED_VALIDATE, &vi, (void *) &dvs );
+
+ // TRD : now we check each element has popped_flag set to RAISED
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( (te_array+loop)->popped_flag == LOWERED )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : cleanup
+ lfds700_stack_cleanup( &ss, NULL );
+ util_aligned_free( te_array );
+ util_aligned_free( ts );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "stack", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_popping( void *util_thread_starter_thread_state )
+{
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_stack_element
+ *se;
+
+ struct test_element
+ *te;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ while( lfds700_stack_pop(ts->ss, &se, &ps) )
+ {
+ te = LFDS700_STACK_GET_VALUE_FROM_ELEMENT( *se );
+ te->popped_flag = RAISED;
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_element;
+
+struct test_state
+{
+ lfds700_pal_uint_t
+ number_elements;
+
+ struct lfds700_stack_state
+ *ss,
+ ss_thread_local;
+
+ struct test_element
+ *ss_thread_local_te_array;
+};
+
+struct test_element
+{
+ struct lfds700_stack_element
+ se,
+ thread_local_se;
+
+ lfds700_pal_uint_t
+ datum;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_popping_and_pushing_start_popping( void *util_thread_starter_thread_state );
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_popping_and_pushing_start_pushing( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_stack_popping_and_pushing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors,
+ subloop;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_stack_state
+ ss;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te_array;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we have two threads per CPU
+ the threads loop for ten seconds
+ the first thread pushes 10000 elements then pops 10000 elements
+ the second thread pops 10000 elements then pushes 10000 elements
+ all pushes and pops go onto the single main stack
+
+ after time is up, all threads push what they have remaining onto
+ the main stack
+
+ we then validate the main stack
+ */
+
+ internal_display_test_name( "Popping and pushing (%d seconds)", TEST_DURATION_IN_SECONDS );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors * 2 );
+
+ lfds700_stack_init_valid_on_current_logical_core( &ss, NULL );
+
+ te_array = util_aligned_malloc( sizeof(struct test_element) * number_elements * number_logical_processors, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ // TRD : some initial elements so the pushing threads can start immediately
+ for( loop = 0 ; loop < number_elements * number_logical_processors ; loop++ )
+ {
+ (te_array+loop)->datum = loop;
+ LFDS700_STACK_SET_VALUE_IN_ELEMENT( (te_array+loop)->se, te_array+loop );
+ lfds700_stack_push( &ss, &(te_array+loop)->se, &ps );
+ }
+
+ ts = util_aligned_malloc( sizeof(struct test_state) * number_logical_processors * 2, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ // TRD : first set of threads
+ (ts+loop)->ss = &ss;
+ (ts+loop)->number_elements = number_elements;
+ lfds700_stack_init_valid_on_current_logical_core( &(ts+loop)->ss_thread_local, NULL );
+
+ // TRD : second set of threads
+ (ts+loop+number_logical_processors)->ss = &ss;
+ (ts+loop+number_logical_processors)->number_elements = number_elements;
+ lfds700_stack_init_valid_on_current_logical_core( &(ts+loop+number_logical_processors)->ss_thread_local, NULL );
+
+ // TRD : fill the pushing thread stacks
+ (ts+loop+number_logical_processors)->ss_thread_local_te_array = util_aligned_malloc( sizeof(struct test_element) * number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( subloop = 0 ; subloop < number_elements ; subloop++ )
+ {
+ ((ts+loop+number_logical_processors)->ss_thread_local_te_array+subloop)->datum = loop;
+ LFDS700_STACK_SET_VALUE_IN_ELEMENT( ((ts+loop+number_logical_processors)->ss_thread_local_te_array+subloop)->thread_local_se, (ts+loop+number_logical_processors)->ss_thread_local_te_array+subloop );
+ lfds700_stack_push( &(ts+loop+number_logical_processors)->ss_thread_local, &((ts+loop+number_logical_processors)->ss_thread_local_te_array+subloop)->thread_local_se, &ps );
+ }
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors * 2 );
+
+ util_thread_starter_new( &tts, number_logical_processors * 2 );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_popping_and_pushing_start_popping, ts+loop );
+ util_thread_starter_start( tts, &thread_handles[loop+number_logical_processors], loop+number_logical_processors, lp, thread_popping_and_pushing_start_pushing, ts+loop+number_logical_processors );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors * 2 ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements * number_logical_processors * 2;
+
+ lfds700_stack_query( &ss, LFDS700_STACK_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ lfds700_stack_cleanup( &ss, NULL );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ lfds700_stack_cleanup( &(ts+loop)->ss_thread_local, NULL );
+ lfds700_stack_cleanup( &(ts+loop+number_logical_processors)->ss_thread_local, NULL );
+ util_aligned_free( (ts+loop+number_logical_processors)->ss_thread_local_te_array );
+ }
+
+ util_aligned_free( ts );
+
+ util_aligned_free( te_array );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "stack", dvs );
+
+ return;
+}
+
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_popping_and_pushing_start_popping( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ count;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_stack_element
+ *se;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ time_t
+ start_time;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ start_time = time( NULL );
+
+ while( time(NULL) < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ count = 0;
+
+ while( count < ts->number_elements )
+ if( lfds700_stack_pop(ts->ss, &se, &ps) )
+ {
+ lfds700_stack_push( &ts->ss_thread_local, se, &ps );
+ count++;
+ }
+
+ // TRD : return our local stack to the main stack
+ while( lfds700_stack_pop(&ts->ss_thread_local, &se, &ps) )
+ lfds700_stack_push( ts->ss, se, &ps );
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_popping_and_pushing_start_pushing( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ count;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_stack_element
+ *se;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ time_t
+ start_time;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ start_time = time( NULL );
+
+ while( time(NULL) < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ // TRD : return our local stack to the main stack
+ while( lfds700_stack_pop(&ts->ss_thread_local, &se, &ps) )
+ lfds700_stack_push( ts->ss, se, &ps );
+
+ count = 0;
+
+ while( count < ts->number_elements )
+ if( lfds700_stack_pop(ts->ss, &se, &ps) )
+ {
+ lfds700_stack_push( &ts->ss_thread_local, se, &ps );
+ count++;
+ }
+ }
+
+ // TRD : now push whatever we have in our local stack
+ while( lfds700_stack_pop(&ts->ss_thread_local, &se, &ps) )
+ lfds700_stack_push( ts->ss, se, &ps );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ lfds700_pal_uint_t
+ number_elements,
+ thread_number;
+
+ struct lfds700_stack_state
+ *ss;
+
+ struct test_element
+ *te_array;
+};
+
+struct test_element
+{
+ struct lfds700_stack_element
+ se;
+
+ lfds700_pal_uint_t
+ datum,
+ thread_number;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_pushing( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_stack_pushing( struct lfds700_list_asu_state *list_of_logical_processors, lfds700_pal_uint_t memory_in_megabytes )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors,
+ *per_thread_counters;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_stack_element
+ *se;
+
+ struct lfds700_stack_state
+ ss;
+
+ struct lfds700_misc_validation_info
+ vi;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te,
+ *first_te = NULL;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : memory_in_megabytes can be any value in its range
+
+ /* TRD : we create an empty stack
+
+ we then create one thread per CPU, where each thread
+ pushes 100,000 elements each as quickly as possible to the stack
+ (the threads themselves alloc these elements, to obtain NUMA closeness)
+
+ the data pushed is a counter and a thread ID
+
+ the threads exit when the stack is full
+
+ we then validate the stack;
+
+ checking that the counts increment on a per unique ID basis
+ and that the number of elements we pop equals 100,000 per thread
+ (since each element has an incrementing counter which is
+ unique on a per unique ID basis, we can know we didn't lose
+ any elements)
+
+ there's no CAS+GC code, as we only push
+ */
+
+ internal_display_test_name( "Pushing" );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ number_elements = ( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES ) / ( sizeof(struct test_element) * number_logical_processors );
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ // TRD : the main stack
+ lfds700_stack_init_valid_on_current_logical_core( &ss, NULL );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (ts+loop)->ss = &ss;
+ (ts+loop)->thread_number = loop;
+ (ts+loop)->number_elements = number_elements;
+ }
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_pushing, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ // TRD : the stack is now fully pushed; time to verify
+ per_thread_counters = util_malloc_wrapper( sizeof(lfds700_pal_uint_t) * number_logical_processors );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = number_elements - 1;
+
+ vi.min_elements = vi.max_elements = number_elements * number_logical_processors;
+
+ lfds700_stack_query( &ss, LFDS700_STACK_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ while( dvs == LFDS700_MISC_VALIDITY_VALID and lfds700_stack_pop(&ss, &se, &ps) )
+ {
+ te = LFDS700_STACK_GET_VALUE_FROM_ELEMENT( *se );
+
+ if( first_te == NULL )
+ first_te = te;
+
+ if( te->thread_number >= number_logical_processors )
+ {
+ dvs = LFDS700_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( te->datum > per_thread_counters[te->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( te->datum < per_thread_counters[te->thread_number] )
+ dvs = LFDS700_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( te->datum == per_thread_counters[te->thread_number] )
+ per_thread_counters[te->thread_number]--;
+ }
+
+ // TRD : clean up
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ util_aligned_free( (ts+loop)->te_array );
+
+ free( per_thread_counters );
+
+ free( ts );
+
+ lfds700_stack_cleanup( &ss, NULL );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "stack", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_pushing( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ loop;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ // TRD : alloc local 100,000 elements
+ ts->te_array = util_aligned_malloc( sizeof(struct test_element) * ts->number_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < ts->number_elements ; loop++ )
+ {
+ (ts->te_array+loop)->thread_number = ts->thread_number;
+ (ts->te_array+loop)->datum = loop;
+ }
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ for( loop = 0 ; loop < ts->number_elements ; loop++ )
+ {
+ LFDS700_STACK_SET_VALUE_IN_ELEMENT( (ts->te_array+loop)->se, ts->te_array+loop );
+ lfds700_stack_push( ts->ss, &(ts->te_array+loop)->se, &ps );
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct test_state
+{
+ struct lfds700_stack_state
+ *ss;
+};
+
+struct test_element
+{
+ struct lfds700_stack_element
+ se;
+
+ lfds700_pal_uint_t
+ datum;
+};
+
+/***** private prototypes *****/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_rapid_popping_and_pushing( void *util_thread_starter_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void test_lfds700_stack_rapid_popping_and_pushing( struct lfds700_list_asu_state *list_of_logical_processors )
+{
+ enum lfds700_misc_validity
+ dvs = LFDS700_MISC_VALIDITY_VALID;
+
+ lfds700_pal_uint_t
+ loop,
+ number_logical_processors;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_list_asu_element
+ *lasue;
+
+ struct lfds700_stack_state
+ ss;
+
+ struct lfds700_misc_validation_info
+ vi = { 0, 0 };
+
+ struct test_pal_logical_processor
+ *lp;
+
+ struct util_thread_starter_state
+ *tts;
+
+ struct test_element
+ *te_array;
+
+ struct test_state
+ *ts;
+
+ test_pal_thread_state_t
+ *thread_handles;
+
+ assert( list_of_logical_processors != NULL );
+ // TRD : st can be any value in its range
+
+ /* TRD : in these tests there is a fundamental antagonism between
+ how much checking/memory clean up that we do and the
+ likelyhood of collisions between threads in their lock-free
+ operations
+
+ the lock-free operations are very quick; if we do anything
+ much at all between operations, we greatly reduce the chance
+ of threads colliding
+
+ so we have some tests which do enough checking/clean up that
+ they can tell the stack is valid and don't leak memory
+ and here, this test now is one of those which does minimal
+ checking - in fact, the nature of the test is that you can't
+ do any real checking - but goes very quickly
+
+ what we do is create a small stack and then run one thread
+ per CPU, where each thread simply pushes and then immediately
+ pops
+
+ the test runs for ten seconds
+
+ after the test is done, the only check we do is to traverse
+ the stack, checking for loops and ensuring the number of
+ elements is correct
+ */
+
+ internal_display_test_name( "Rapid popping and pushing (%d seconds)", TEST_DURATION_IN_SECONDS );
+
+ lfds700_list_asu_query( list_of_logical_processors, LFDS700_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds700_misc_prng_init( &ps );
+
+ ts = util_malloc_wrapper( sizeof(struct test_state) * number_logical_processors );
+
+ lfds700_stack_init_valid_on_current_logical_core( &ss, NULL );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ (ts+loop)->ss = &ss;
+
+ thread_handles = util_malloc_wrapper( sizeof(test_pal_thread_state_t) * number_logical_processors );
+
+ // TRD : we need one element per thread
+ te_array = util_aligned_malloc( sizeof(struct test_element) * number_logical_processors, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ LFDS700_STACK_SET_VALUE_IN_ELEMENT( (te_array+loop)->se, te_array+loop );
+ lfds700_stack_push( &ss, &(te_array+loop)->se, &ps );
+ }
+
+ util_thread_starter_new( &tts, number_logical_processors );
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS700_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS700_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ util_thread_starter_start( tts, &thread_handles[loop], loop, lp, thread_rapid_popping_and_pushing, ts+loop );
+ loop++;
+ }
+
+ util_thread_starter_run( tts );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ test_pal_thread_wait( thread_handles[loop] );
+
+ util_thread_starter_delete( tts );
+
+ free( thread_handles );
+
+ LFDS700_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_logical_processors;
+
+ lfds700_stack_query( &ss, LFDS700_STACK_QUERY_SINGLETHREADED_VALIDATE, &vi, &dvs );
+
+ lfds700_stack_cleanup( &ss, NULL );
+
+ util_aligned_free( te_array );
+
+ free( ts );
+
+ // TRD : print the test result
+ internal_display_test_result( 1, "stack", dvs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION thread_rapid_popping_and_pushing( void *util_thread_starter_thread_state )
+{
+ lfds700_pal_uint_t
+ time_loop = 0;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_stack_element
+ *se;
+
+ struct test_state
+ *ts;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS700_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ assert( util_thread_starter_thread_state != NULL );
+
+ tsts = (struct util_thread_starter_thread_state *) util_thread_starter_thread_state;
+ ts = (struct test_state *) tsts->thread_user_state;
+
+ lfds700_misc_prng_init( &ps );
+
+ util_thread_starter_ready_and_wait( tsts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ lfds700_stack_pop( ts->ss, &se, &ps );
+ lfds700_stack_push( ts->ss, se, &ps );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return( (test_pal_thread_return_t) EXIT_SUCCESS );
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && !defined _KERNEL_MODE && NTDDI_VERSION >= NTDDI_WIN7 )
+
+ /* TRD : _WIN32 indicates 64-bit or 32-bit Windows
+ !_KERNEL_MODE indicates Windows user-mode
+ NTDDI_VERSION indicates Windows version
+ - GetLogicalProcessorInformationEx requires Windows 7
+ */
+
+ #ifdef TEST_PAL_GET_LOGICAL_CORE_IDS
+ #error More than one porting abstraction layer matches current platform in test_porting_abstraction_layer_get_logical_core_ids.c
+ #endif
+
+ #define TEST_PAL_GET_LOGICAL_CORE_IDS
+
+ void test_pal_get_logical_core_ids( struct lfds700_list_asu_state *lasus )
+ {
+ BOOL
+ rv;
+
+ DWORD
+ loop,
+ number_slpie,
+ slpie_length = 0;
+
+ lfds700_pal_uint_t
+ bitmask,
+ logical_processor_number,
+ windows_logical_processor_group_number;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX
+ *slpie = NULL;
+
+ assert( lasus != NULL );
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_list_asu_init_valid_on_current_logical_core( lasus, NULL, NULL );
+
+ rv = GetLogicalProcessorInformationEx( RelationGroup, slpie, &slpie_length );
+ slpie = malloc( slpie_length );
+ rv = GetLogicalProcessorInformationEx( RelationGroup, slpie, &slpie_length );
+ number_slpie = slpie_length / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX);
+
+ for( loop = 0 ; loop < number_slpie ; loop++ )
+ if( (slpie+loop)->Relationship == RelationGroup )
+ for( windows_logical_processor_group_number = 0 ; windows_logical_processor_group_number < (slpie+loop)->Group.ActiveGroupCount ; windows_logical_processor_group_number++ )
+ for( logical_processor_number = 0 ; logical_processor_number < sizeof(KAFFINITY) * BITS_PER_BYTE ; logical_processor_number++ )
+ {
+ bitmask = (lfds700_pal_uint_t) 1 << logical_processor_number;
+
+ // TRD : if we've found a processor for this group, add it to the list
+ if( (slpie+loop)->Group.GroupInfo[windows_logical_processor_group_number].ActiveProcessorMask & bitmask )
+ {
+ lp = util_aligned_malloc( sizeof(struct test_pal_logical_processor), LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lp->logical_processor_number = logical_processor_number;
+ lp->windows_logical_processor_group_number = windows_logical_processor_group_number;
+
+ LFDS700_LIST_ASU_SET_VALUE_IN_ELEMENT( lp->lasue, lp );
+ lfds700_list_asu_insert_at_start( lasus, &lp->lasue, &ps );
+ }
+ }
+
+ free( slpie );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && !defined _KERNEL_MODE && NTDDI_VERSION >= NTDDI_WINXP && NTDDI_VERSION < NTDDI_WIN7 )
+
+ /* TRD : _WIN32 indicates 64-bit or 32-bit Windows
+ !_KERNEL_MODE indicates Windows user-mode
+ NTDDI_VERSION indicates Windows version
+ - GetLogicalProcessorInformation requires XP SP3
+ */
+
+ #ifdef TEST_PAL_GET_LOGICAL_CORE_IDS
+ #error More than one porting abstraction layer matches current platform in test_porting_abstraction_layer_get_logical_core_ids.c
+ #endif
+
+ #define TEST_PAL_GET_LOGICAL_CORE_IDS
+
+ void test_pal_get_logical_core_ids( struct lfds700_list_asu_state *lasus )
+ {
+ DWORD
+ slpi_length = 0;
+
+ lfds700_pal_uint_t
+ number_slpi,
+ loop;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION
+ *slpi = NULL;
+
+ ULONG_PTR
+ mask;
+
+ assert( lasus != NULL );
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_list_asu_init_valid_on_current_logical_core( lasus, NULL, NULL );
+
+ *number_logical_processors = 0;
+
+ GetLogicalProcessorInformation( slpi, &slpi_length );
+ slpi = malloc( slpi_length );
+ GetLogicalProcessorInformation( slpi, &slpi_length );
+ number_slpi = slpi_length / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
+
+ for( loop = 0 ; loop < number_slpi ; loop++ )
+ if( (slpi+loop)->Relationship == RelationProcessorCore )
+ for( logical_processor_number = 0 ; logical_processor_number < sizeof(ULONG_PTR) * BITS_PER_BYTE ; logical_processor_number++ )
+ {
+ bitmask = 1 << logical_processor_number;
+
+ if( (slpi+loop)->ProcessorMask & bitmask )
+ {
+ lp = util_aligned_malloc( sizeof(struct test_pal_logical_processor), LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lp->logical_processor_number = logical_processor_number;
+ lp->windows_logical_processor_group_number = 0;
+
+ LFDS700_LIST_ASU_SET_VALUE_IN_ELEMENT( lp->lasue, lp );
+ lfds700_list_asu_insert_at_start( lasus, &lp->lasue, &ps );
+ }
+
+ free( slpi );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ )
+
+ /* TRD : __linux__ indicates Linux
+ __STDC__ indicates Standard Library
+ __STDC_HOSTED__ indicates Standard Library hosted implementation
+ - fopen requires a Standard Library hosted environment
+ - setbuf requires a Standard Library hosted environment
+ - fgets requires a Standard Library hosted environment
+ - sscanf requires a Standard Library hosted environment
+ - fclose requires a Standard Library hosted environment
+ */
+
+ #ifdef TEST_PAL_GET_LOGICAL_CORE_IDS
+ #error More than one porting abstraction layer matches current platform in test_porting_abstraction_layer_get_logical_core_ids.c
+ #endif
+
+ #define TEST_PAL_GET_LOGICAL_CORE_IDS
+
+ void test_pal_get_logical_core_ids( struct lfds700_list_asu_state *lasus )
+ {
+ char
+ diskbuffer[BUFSIZ],
+ string[1024];
+
+ FILE
+ *diskfile;
+
+ int long long unsigned
+ logical_processor_number;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct test_pal_logical_processor
+ *lp;
+
+ assert( lasus != NULL );
+
+ lfds700_misc_prng_init( &ps );
+
+ lfds700_list_asu_init_valid_on_current_logical_core( lasus, NULL, NULL );
+
+ diskfile = fopen( "/proc/cpuinfo", "r" );
+
+ if( diskfile != NULL )
+ {
+ setbuf( diskfile, diskbuffer );
+
+ while( NULL != fgets(string, 1024, diskfile) )
+ if( 1 == sscanf(string, "processor : %llu", &logical_processor_number) )
+ {
+ lp = util_aligned_malloc( sizeof(struct test_pal_logical_processor), LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lp->logical_processor_number = (lfds700_pal_uint_t) logical_processor_number;
+ lp->windows_logical_processor_group_number = 0;
+
+ LFDS700_LIST_ASU_SET_VALUE_IN_ELEMENT( lp->lasue, lp );
+ lfds700_list_asu_insert_at_start( lasus, &lp->lasue, &ps );
+ }
+
+ fclose( diskfile );
+ }
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined TEST_PAL_GET_LOGICAL_CORE_IDS )
+
+ #error test_pal_get_logical_core_ids() not implemented for this platform.
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _MSC_VER )
+ /* TRD : MSVC compiler
+
+ an unfortunately necessary hack for MSVC
+ MSVC only defines __STDC__ if /Za is given, where /Za turns off MSVC C extensions -
+ which prevents Windows header files from compiling.
+ */
+
+ #define __STDC__ 1
+ #define __STDC_HOSTED__ 1
+#endif
+
+#if( defined __linux__ )
+ #define _GNU_SOURCE
+ #include <unistd.h>
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _MSC_VER && _MSC_VER >= 1310 && NTDDI_VERSION >= NTDDI_WINXP && defined _WIN32 )
+
+ #ifdef TEST_PAL_PORTING_ABSTRACTION_LAYER
+ #error More than one porting abstraction layer matches current platform.
+ #endif
+
+ #define TEST_PAL_PORTING_ABSTRACTION_LAYER
+
+ #define TEST_PAL_OS_STRING "Windows"
+
+ #include <windows.h>
+
+ typedef HANDLE test_pal_thread_state_t;
+ typedef DWORD test_pal_thread_return_t;
+
+ #define TEST_PAL_CALLING_CONVENTION WINAPI
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __linux__ && _POSIX_THREADS > 0 )
+
+ #ifdef TEST_PAL_PORTING_ABSTRACTION_LAYER
+ #error More than one porting abstraction layer matches current platform.
+ #endif
+
+ #define TEST_PAL_PORTING_ABSTRACTION_LAYER
+
+ #define TEST_PAL_OS_STRING "Linux"
+
+ #define _GNU_SOURCE
+
+ #include <pthread.h>
+ #include <sched.h>
+ #include <sys/syscall.h>
+ #include <sys/types.h>
+
+ typedef pthread_t test_pal_thread_state_t;
+ typedef void * test_pal_thread_return_t;
+
+ #define TEST_PAL_CALLING_CONVENTION
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined TEST_PAL_PORTING_ABSTRACTION_LAYER )
+
+ #error No matching porting abstraction layer in test_porting_abstraction_layer_operating_system.h
+
+#endif
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && !defined _KERNEL_MODE && NTDDI_VERSION >= NTDDI_WIN7 )
+
+ /* TRD : _WIN32 indicates 32-bit or 64-bit Windows
+ !_KERNEL_MODE indicates Windows user-mode
+ NTDDI_VERSION indicates Windows version
+ - GetCurrentProcess requires XP
+ - InitializeProcThreadAttributeList requires Windows 7
+ - CreateRemoteThreadEx requires Windows 7
+ */
+
+ #ifdef TEST_PAL_THREAD_START
+ #error More than one porting abstraction layer matches the current platform in test_porting_abstraction_layer_thread_start.c
+ #endif
+
+ #define TEST_PAL_THREAD_START
+
+ int test_pal_thread_start( test_pal_thread_state_t *thread_state,
+ struct test_pal_logical_processor *lp,
+ test_pal_thread_return_t (TEST_PAL_CALLING_CONVENTION *thread_function)(void *thread_user_state),
+ void *thread_user_state )
+ {
+ BOOL
+ brv;
+
+ DWORD
+ thread_id;
+
+ GROUP_AFFINITY
+ ga;
+
+ int
+ rv = 0;
+
+ LPPROC_THREAD_ATTRIBUTE_LIST
+ attribute_list;
+
+ SIZE_T
+ attribute_list_length;
+
+ assert( thread_state != NULL );
+ assert( lp != NULL );
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ /* TRD : here we're using CreateRemoteThreadEx() to start a thread in our own process
+ we do this because as a function, it allows us to specify processor and processor group affinity in the create call
+ */
+
+ brv = InitializeProcThreadAttributeList( NULL, 1, 0, &attribute_list_length );
+ attribute_list = malloc( attribute_list_length );
+ brv = InitializeProcThreadAttributeList( attribute_list, 1, 0, &attribute_list_length );
+
+ ga.Mask = ( (KAFFINITY) 1 << lp->logical_processor_number );
+ ga.Group = (WORD) lp->windows_logical_processor_group_number;
+ memset( ga.Reserved, 0, sizeof(WORD) * 3 );
+
+ brv = UpdateProcThreadAttribute( attribute_list, 0, PROC_THREAD_ATTRIBUTE_GROUP_AFFINITY, &ga, sizeof(GROUP_AFFINITY), NULL, NULL );
+ *thread_state = CreateRemoteThreadEx( GetCurrentProcess(), NULL, 0, thread_function, thread_user_state, NO_FLAGS, attribute_list, &thread_id );
+
+ DeleteProcThreadAttributeList( attribute_list );
+ free( attribute_list );
+
+ if( *thread_state != NULL )
+ rv = 1;
+
+ return( rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && !defined _KERNEL_MODE && NTDDI_VERSION >= NTDDI_WINXP && NTDDI_VERSION < NTDDI_WIN7 )
+
+ /* TRD : _WIN32 indicates 64-bit or 32-bit Windows
+ NTDDI_VERSION indicates Windows version
+ - CreateThread requires XP
+ - SetThreadAffinityMask requires XP
+ - ResumeThread requires XP
+ */
+
+ #ifdef TEST_PAL_THREAD_START
+ #error More than one porting abstraction layer matches the current platform in test_porting_abstraction_layer_thread_start.c
+ #endif
+
+ #define TEST_PAL_THREAD_START
+
+ int test_pal_thread_start( test_pal_thread_state_t *thread_state,
+ struct test_pal_logical_processor *lp,
+ test_pal_thread_return_t (TEST_PAL_CALLING_CONVENTION *thread_function)(void *thread_user_state),
+ void *thread_user_state )
+ {
+ int
+ rv = 0;
+
+ DWORD
+ thread_id;
+
+ DWORD_PTR
+ affinity_mask,
+ result;
+
+ assert( thread_state != NULL );
+ assert( lp != NULL );
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ /* TRD : Vista and earlier do not support processor groups
+ as such, there is a single implicit processor group
+ also, there's no support for actually starting a thread in its correct NUMA node / logical processor
+ so we make the best of it; we start suspended, set the affinity, and then resume
+ the thread itself internally is expected to be making allocs from the correct NUMA node
+ */
+
+ *thread_state = CreateThread( NULL, 0, thread_function, thread_user_state, CREATE_SUSPENDED, &thread_id );
+
+ affinity_mask = (DWORD_PTR) (1 << lp->logical_processor_number);
+
+ SetThreadAffinityMask( *thread_state, affinity_mask );
+
+ ResumeThread( *thread_state );
+
+ if( *thread_state != NULL )
+ rv = 1;
+
+ return( rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && _POSIX_THREADS > 0 )
+
+ /* TRD : __linux__ indicates Linux
+ - gettid requires Linux
+ - sched_setaffinity requires Linux
+ _POSIX_THREADS indicates POSIX threads
+ - pthread_create requires POSIX
+ */
+
+ #ifdef TEST_PAL_THREAD_START
+ #error More than one porting abstraction layer matches the current platform in test_porting_abstraction_layer_thread_start.c
+ #endif
+
+ #define TEST_PAL_THREAD_START
+
+ /***** structs *****/
+ struct test_pal_internal_thread_state
+ {
+ struct test_pal_logical_processor
+ lp;
+
+ test_pal_thread_return_t
+ (TEST_PAL_CALLING_CONVENTION *thread_function)( void *thread_user_state );
+
+ void
+ *thread_user_state;
+ };
+
+ /***** prototypes *****/
+ test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION test_pal_internal_thread_function( void *thread_user_state );
+
+ /****************************************************************************/
+ int test_pal_thread_start( test_pal_thread_state_t *thread_state,
+ struct test_pal_logical_processor *lp,
+ test_pal_thread_return_t (TEST_PAL_CALLING_CONVENTION *thread_function)(void *thread_user_state),
+ void *thread_user_state )
+ {
+ int
+ rv;
+
+ struct test_pal_internal_thread_state
+ *its;
+
+ /* TRD : this implementation works on Linux only as it uses sched_setaffinity(), which is Linux specific
+ although I cannot currently test, I believe this function also works on Android
+
+ this implementation exists because the pthreads function for setting thread affinity,
+ pthread_attr_setaffinity_np(), works on Linux, but not Android
+ */
+
+ assert( thread_state != NULL );
+ assert( lp != NULL );
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ its = malloc( sizeof(struct test_pal_internal_thread_state) );
+
+ its->lp = *lp;
+ its->thread_function = thread_function;
+ its->thread_user_state = thread_user_state;
+
+ rv = pthread_create( thread_state, NULL, test_pal_internal_thread_function, its );
+
+ if( rv == 0 )
+ rv = 1;
+
+ return( rv );
+ }
+
+ /****************************************************************************/
+ test_pal_thread_return_t TEST_PAL_CALLING_CONVENTION test_pal_internal_thread_function( void *thread_user_state )
+ {
+ cpu_set_t
+ cpuset;
+
+ pid_t
+ tid;
+
+ struct test_pal_internal_thread_state
+ *its;
+
+ test_pal_thread_return_t
+ rv;
+
+ assert( thread_user_state != NULL );
+
+ /* TRD : the APIs under Linux/POSIX for setting thread affinity are in a mess
+ pthreads offers pthread_attr_setaffinity_np(), which glibc supports,
+ but which is not supported by Android
+ Linux offers sched_setaffinity(), but this needs a *thread pid*,
+ and the only API to get a thread pid is gettid(), which works for
+ and only for *the calling thread*
+
+ so we come to this - a wrapper thread function, which is the function used
+ when starting a thread; this calls gettid() and then sched_setaffinity(),
+ and then calls into the actual thread function
+
+ generally shaking my head in disbelief at this point
+ */
+
+ assert( thread_user_state != NULL );
+
+ its = (struct test_pal_internal_thread_state *) thread_user_state;
+
+ CPU_ZERO( &cpuset );
+ CPU_SET( its->lp.logical_processor_number, &cpuset );
+
+ tid = syscall( SYS_gettid );
+
+ sched_setaffinity( tid, sizeof(cpu_set_t), &cpuset );
+
+ rv = its->thread_function( its->thread_user_state );
+
+ free( its );
+
+ return( rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined __linux__ && _POSIX_THREADS > 0 )
+
+ /* TRD : !__linux__ indicates not Linux
+ _POSIX_THREADS indicates POSIX threads
+ - pthread_attr_init requires POSIX
+ - pthread_attr_setaffinity_np requires POSIX
+ - pthread_create requires POSIX
+ - pthread_attr_destroy requires POSIX
+ */
+
+ #ifdef TEST_PAL_THREAD_START
+ #error More than one porting abstraction layer matches the current platform in test_porting_abstraction_layer_thread_start.c
+ #endif
+
+ #define TEST_PAL_THREAD_START
+
+ int test_pal_thread_start( test_pal_thread_state_t *thread_state,
+ struct test_pal_logical_processor *lp,
+ test_pal_thread_return_t (TEST_PAL_CALLING_CONVENTION *thread_function)(void *thread_user_state),
+ void *thread_user_state )
+ {
+ int
+ rv = 0,
+ rv_create;
+
+ cpu_set_t
+ cpuset;
+
+ pthread_attr_t
+ attr;
+
+ assert( thread_state != NULL );
+ assert( lp != NULL );
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ pthread_attr_init( &attr );
+
+ CPU_ZERO( &cpuset );
+ CPU_SET( lp->logical_processor_number, &cpuset );
+ pthread_attr_setaffinity_np( &attr, sizeof(cpuset), &cpuset );
+
+ rv_create = pthread_create( thread_state, &attr, thread_function, thread_user_state );
+
+ if( rv_create == 0 )
+ rv = 1;
+
+ pthread_attr_destroy( &attr );
+
+ return( rv );
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined TEST_PAL_THREAD_START )
+
+ #error test_pal_thread_start() not implemented for this platform.
+
+#endif
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && NTDDI_VERSION >= NTDDI_WINXP )
+
+ /* TRD : _WIN32 indicates 32-bit or 64-bit Windows
+ NTDDI_VERSION indicates Windows version
+ - WaitForSingleObject requires XP
+ */
+
+ #ifdef TEST_PAL_THREAD_WAIT
+ #error More than one porting abstraction layer matches current platform in test_porting_abstraction_layer_thread_wait.c
+ #endif
+
+ #define TEST_PAL_THREAD_WAIT
+
+ void test_pal_thread_wait( test_pal_thread_state_t thread_state )
+ {
+ WaitForSingleObject( thread_state, INFINITE );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( _POSIX_THREADS > 0 )
+
+ /* TRD : POSIX threads
+
+ _POSIX_THREADS indicates POSIX threads
+ - pthread_join requires POSIX
+ */
+
+ #ifdef TEST_PAL_THREAD_WAIT
+ #error More than one porting abstraction layer matches current platform in test_porting_abstraction_layer_thread_wait.c
+ #endif
+
+ #define TEST_PAL_THREAD_WAIT
+
+ void test_pal_thread_wait( test_pal_thread_state_t thread_state )
+ {
+ pthread_join( thread_state, NULL );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined TEST_PAL_THREAD_WAIT )
+
+ #error test_pal_thread_wait() not implemented for this platform.
+
+#endif
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void util_cmdline_init( struct util_cmdline_state *cs )
+{
+ lfds700_pal_uint_t
+ loop;
+
+ assert( cs != NULL );
+
+ for( loop = 0 ; loop < NUMBER_OF_LOWERCASE_LETTERS_IN_LATIN_ALPHABET ; loop++ )
+ {
+ cs->args[loop].arg_type = LIBCOMMON_CMDLINE_ARG_TYPE_UNSET;
+ cs->args[loop].processed_flag = LOWERED;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void util_cmdline_cleanup( struct util_cmdline_state *cs )
+{
+ assert( cs != NULL );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+void util_cmdline_add_arg( struct util_cmdline_state *cs, char arg_letter, enum util_cmdline_arg_type arg_type )
+{
+ lfds700_pal_uint_t
+ index;
+
+ assert( cs != NULL );
+ assert( arg_letter >= 'a' and arg_letter <= 'z' );
+ // TRD : arg_type can be any value in its range
+
+ index = arg_letter - 'a';
+
+ cs->args[index].arg_type = arg_type;
+
+ if( arg_type == LIBCOMMON_CMDLINE_ARG_TYPE_FLAG )
+ cs->args[index].arg_data.flag.flag = LOWERED;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int util_cmdline_process_args( struct util_cmdline_state *cs, int argc, char **argv )
+{
+ char
+ *arg;
+
+ int
+ arg_letter,
+ cc,
+ loop,
+ rv = 1;
+
+ lfds700_pal_uint_t
+ index;
+
+ assert( cs != NULL );
+ assert( argc >= 1 );
+ assert( argv != NULL );
+
+ for( loop = 1 ; loop < argc ; loop++ )
+ {
+ arg = *(argv+loop);
+
+ switch( *arg )
+ {
+ case '-':
+ arg_letter = tolower( *(arg+1) );
+
+ if( arg_letter >= 'a' and arg_letter <= 'z' )
+ {
+ index = arg_letter - 'a';
+
+ switch( cs->args[index].arg_type )
+ {
+ case LIBCOMMON_CMDLINE_ARG_TYPE_INTEGER_RANGE:
+ if( loop+1 >= argc )
+ rv = 0;
+
+ if( loop+1 < argc )
+ {
+ cc = sscanf( *(argv+loop+1), "%llu-%llu", &cs->args[index].arg_data.integer_range.integer_start, &cs->args[index].arg_data.integer_range.integer_end );
+
+ if( cc != 2 )
+ rv = 0;
+
+ if( cc == 2 )
+ {
+ cs->args[index].processed_flag = RAISED;
+ loop++;
+ }
+ }
+ break;
+
+ case LIBCOMMON_CMDLINE_ARG_TYPE_INTEGER:
+ if( loop+1 >= argc )
+ rv = 0;
+
+ if( loop+1 < argc )
+ {
+ cc = sscanf( *(argv+loop+1), "%llu", &cs->args[index].arg_data.integer.integer );
+
+ if( cc != 1 )
+ rv = 0;
+
+ if( cc == 1 )
+ {
+ cs->args[index].processed_flag = RAISED;
+ loop++;
+ }
+ }
+ break;
+
+ case LIBCOMMON_CMDLINE_ARG_TYPE_FLAG:
+ cs->args[index].arg_data.flag.flag = RAISED;
+ cs->args[index].processed_flag = RAISED;
+ break;
+
+ case LIBCOMMON_CMDLINE_ARG_TYPE_UNSET:
+ break;
+ }
+ }
+ break;
+
+ default:
+ rv = 0;
+ break;
+ }
+ }
+
+ return( rv );
+}
+
+
+
+
+
+/****************************************************************************/
+void util_cmdline_get_arg_data( struct util_cmdline_state *cs, char arg_letter, union util_cmdline_arg_data **arg_data )
+{
+ lfds700_pal_uint_t
+ index;
+
+ assert( cs != NULL );
+ assert( arg_letter >= 'a' and arg_letter <= 'z' );
+ assert( arg_data != NULL );
+
+ index = arg_letter - 'a';
+
+ if( cs->args[index].processed_flag == RAISED )
+ *arg_data = &cs->args[index].arg_data;
+ else
+ *arg_data = NULL;
+
+ return;
+}
+
--- /dev/null
+/***** defines *****/
+#define NUMBER_OF_LOWERCASE_LETTERS_IN_LATIN_ALPHABET 26
+
+/***** enums *****/
+enum util_cmdline_arg_type
+{
+ LIBCOMMON_CMDLINE_ARG_TYPE_INTEGER_RANGE,
+ LIBCOMMON_CMDLINE_ARG_TYPE_INTEGER,
+ LIBCOMMON_CMDLINE_ARG_TYPE_FLAG,
+ LIBCOMMON_CMDLINE_ARG_TYPE_UNSET
+};
+
+/***** structs *****/
+struct util_cmdline_arg_integer_range
+{
+ int long long unsigned
+ integer_start,
+ integer_end;
+};
+
+struct util_cmdline_arg_integer
+{
+ int long long unsigned
+ integer;
+};
+
+struct util_cmdline_arg_flag
+{
+ enum flag
+ flag;
+};
+
+union util_cmdline_arg_data
+{
+ struct util_cmdline_arg_integer_range
+ integer_range;
+
+ struct util_cmdline_arg_integer
+ integer;
+
+ struct util_cmdline_arg_flag
+ flag;
+};
+
+struct util_cmdline_arg_letter_and_data
+{
+ enum util_cmdline_arg_type
+ arg_type;
+
+ enum flag
+ processed_flag;
+
+ union util_cmdline_arg_data
+ arg_data;
+};
+
+struct util_cmdline_state
+{
+ struct util_cmdline_arg_letter_and_data
+ args[NUMBER_OF_LOWERCASE_LETTERS_IN_LATIN_ALPHABET];
+};
+
+/***** public protoypes *****/
+void util_cmdline_init( struct util_cmdline_state *cs );
+void util_cmdline_cleanup( struct util_cmdline_state *cs );
+void util_cmdline_add_arg( struct util_cmdline_state *cs, char arg_letter, enum util_cmdline_arg_type arg_type );
+int util_cmdline_process_args( struct util_cmdline_state *cs, int argc, char **argv );
+void util_cmdline_get_arg_data( struct util_cmdline_state *cs, char arg_letter, union util_cmdline_arg_data **arg_data );
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void *util_aligned_malloc( lfds700_pal_uint_t size, lfds700_pal_uint_t align_in_bytes )
+{
+ lfds700_pal_uint_t
+ offset;
+
+ void
+ *memory,
+ *original_memory;
+
+ // TRD : size can be any value in its range
+ // TRD : align_in_bytes can be any value in its range
+
+ /* TRD : helper function to provide aligned allocations
+ no porting required
+ */
+
+ original_memory = memory = util_malloc_wrapper( size + sizeof(void *) + align_in_bytes );
+
+ if( memory != NULL )
+ {
+ memory = (void **) memory + 1;
+ offset = align_in_bytes - (lfds700_pal_uint_t) memory % align_in_bytes;
+ memory = (char unsigned *) memory + offset;
+ *( (void **) memory - 1 ) = original_memory;
+ }
+
+ return( memory );
+}
+
+
+
+
+
+/****************************************************************************/
+void util_aligned_free( void *memory )
+{
+ assert( memory != NULL );
+
+ // TRD : the "void *" stored above memory points to the root of the allocation
+ free( *( (void **) memory - 1 ) );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void *util_malloc_wrapper( lfds700_pal_uint_t size )
+{
+ void
+ *memory;
+
+ // TRD : size can be any value in its range
+
+ memory = malloc( size );
+
+ if( memory == NULL )
+ {
+ puts( "malloc() failed, exiting." );
+ exit( EXIT_FAILURE );
+ }
+
+ return( memory );
+}
+
--- /dev/null
+/***** public prototypes *****/
+void *util_aligned_malloc( lfds700_pal_uint_t size, lfds700_pal_uint_t align_in_bytes );
+void util_aligned_free( void *memory );
+void *util_malloc_wrapper( lfds700_pal_uint_t size );
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void util_thread_starter_new( struct util_thread_starter_state **tts, lfds700_pal_uint_t number_threads )
+{
+ lfds700_pal_uint_t
+ loop;
+
+ assert( tts != NULL );
+ // TRD : number_threads cam be any value in its range
+
+ *tts = util_malloc_wrapper( sizeof(struct util_thread_starter_state) );
+
+ (*tts)->tsts = util_malloc_wrapper( sizeof(struct util_thread_starter_thread_state) * number_threads );
+ (*tts)->thread_start_flag = LOWERED;
+ (*tts)->number_thread_states = number_threads;
+
+ for( loop = 0 ; loop < number_threads ; loop++ )
+ {
+ ((*tts)->tsts+loop)->thread_ready_flag = LOWERED;
+ ((*tts)->tsts+loop)->thread_start_flag = &(*tts)->thread_start_flag;
+ }
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void util_thread_starter_start( struct util_thread_starter_state *tts,
+ test_pal_thread_state_t *thread_state,
+ lfds700_pal_uint_t thread_number,
+ struct test_pal_logical_processor *lp,
+ test_pal_thread_return_t (TEST_PAL_CALLING_CONVENTION *thread_function)( void *thread_user_state ),
+ void *thread_user_state )
+{
+ assert( tts != NULL );
+ assert( thread_state != NULL );
+ assert( lp != NULL );
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ (tts->tsts+thread_number)->thread_user_state = thread_user_state;
+
+ util_thread_start_wrapper( thread_state, lp, thread_function, tts->tsts+thread_number );
+
+ // TRD : wait for the thread to indicate it is ready and waiting
+ while( (tts->tsts+thread_number)->thread_ready_flag == LOWERED );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void util_thread_starter_ready_and_wait( struct util_thread_starter_thread_state *tsts )
+{
+ assert( tsts != NULL );
+
+ tsts->thread_ready_flag = RAISED;
+
+ LFDS700_MISC_BARRIER_FULL;
+
+ // TRD : threads here are all looping, so we don't need to force a store
+
+ while( *tsts->thread_start_flag == LOWERED )
+ LFDS700_MISC_BARRIER_LOAD;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void util_thread_starter_run( struct util_thread_starter_state *tts )
+{
+ assert( tts != NULL );
+
+ /* TRD : all threads at this point are ready to go
+ as we wait for their ready flag immediately after their spawn
+ */
+
+ tts->thread_start_flag = RAISED;
+
+ LFDS700_MISC_BARRIER_STORE;
+
+ lfds700_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void util_thread_starter_delete( struct util_thread_starter_state *tts )
+{
+ assert( tts != NULL );
+
+ free( tts->tsts );
+
+ free( tts );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void util_thread_start_wrapper( test_pal_thread_state_t *thread_state,
+ struct test_pal_logical_processor *lp,
+ test_pal_thread_return_t (TEST_PAL_CALLING_CONVENTION *thread_function)(void *thread_user_state),
+ void *thread_user_state )
+{
+ int
+ rv;
+
+ assert( thread_state != NULL );
+ assert( lp != NULL );
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ rv = test_pal_thread_start( thread_state, lp, thread_function, thread_user_state );
+
+ if( rv == 0 )
+ {
+ puts( "test_pal_thread_start() failed." );
+ exit( EXIT_FAILURE );
+ }
+
+ return;
+}
+
--- /dev/null
+/***** structs *****/
+struct util_thread_starter_thread_state
+{
+ // TRD : must be volatile or the compiler optimizes it away into a single load
+ enum flag volatile
+ thread_ready_flag,
+ *thread_start_flag;
+
+ void
+ *thread_user_state;
+};
+
+struct util_thread_starter_state
+{
+ enum flag volatile
+ thread_start_flag;
+
+ lfds700_pal_uint_t
+ number_thread_states;
+
+ struct util_thread_starter_thread_state
+ *tsts;
+};
+
+/***** prototypes *****/
+void util_thread_starter_new( struct util_thread_starter_state **tts, lfds700_pal_uint_t number_threads );
+void util_thread_starter_start( struct util_thread_starter_state *tts,
+ test_pal_thread_state_t *thread_state,
+ lfds700_pal_uint_t thread_number,
+ struct test_pal_logical_processor *lp,
+ test_pal_thread_return_t (TEST_PAL_CALLING_CONVENTION *thread_function)( void *thread_user_state ),
+ void *thread_user_state );
+void util_thread_starter_ready_and_wait( struct util_thread_starter_thread_state *tsts );
+void util_thread_starter_run( struct util_thread_starter_state *tts );
+void util_thread_starter_delete( struct util_thread_starter_state *tts );
+
+void util_thread_start_wrapper( test_pal_thread_state_t *thread_state,
+ struct test_pal_logical_processor *lp,
+ test_pal_thread_return_t (TEST_PAL_CALLING_CONVENTION *thread_function)(void *thread_user_state),
+ void *thread_user_state );
+
--- /dev/null
+##### notes #####
+# TRD : -fno-strict-aliasing is needed because GCC has messed up type punning and __may_alias__ does absolutely nothing
+# -Wno-unused-but-set-variable and -Wno-uninitialized are needed because GCC seems confused by the atomic intrinsics
+# the code base for release has been compiled with those warnings enabled, to show any valid errors
+
+##### paths #####
+BINDIR := ../../bin
+INCDIR := ../../inc
+OBJDIR := ../../obj
+SRCDIR := ../../src
+INSINCDIR := /usr/local/include/
+INSLIBDIR := /usr/local/lib/
+
+##### misc #####
+QUIETLY := 1>/dev/null 2>/dev/null
+VERSION_NUMBER := 1
+MINOR_NUMBER := 0
+RELEASE_NUMBER := 0
+
+##### sources, objects and libraries #####
+BINNAME := liblfds710
+ARFILENAME := $(BINNAME).a
+ARPATHNAME := $(BINDIR)/$(ARFILENAME)
+SOBASENAME := $(BINNAME).so
+SONAME := $(SOBASENAME).$(VERSION_NUMBER)
+SOFILENAME := $(SONAME).$(MINOR_NUMBER).$(RELEASE_NUMBER)
+SOPATHNAME := $(BINDIR)/$(SOFILENAME)
+INCNAME := $(INCDIR)/$(BINNAME).h
+SRCDIRS := lfds710_btree_addonly_unbalanced lfds710_freelist lfds710_hash_addonly lfds710_list_addonly_singlylinked_ordered lfds710_list_addonly_singlylinked_unordered lfds710_misc lfds710_prng lfds710_queue_bounded_manyproducer_manyconsumer lfds710_queue_bounded_singleproducer_singleconsumer lfds710_queue_unbounded_manyproducer_manyconsumer lfds710_ringbuffer lfds710_stack
+SOURCES := lfds710_hash_addonly_cleanup.c lfds710_hash_addonly_get.c lfds710_hash_addonly_init.c lfds710_hash_addonly_insert.c lfds710_hash_addonly_iterate.c lfds710_hash_addonly_query.c \
+ lfds710_list_addonly_singlylinked_ordered_cleanup.c lfds710_list_addonly_singlylinked_ordered_get.c lfds710_list_addonly_singlylinked_ordered_init.c lfds710_list_addonly_singlylinked_ordered_insert.c lfds710_list_addonly_singlylinked_ordered_query.c \
+ lfds710_list_addonly_singlylinked_unordered_cleanup.c lfds710_list_addonly_singlylinked_unordered_get.c lfds710_list_addonly_singlylinked_unordered_init.c lfds710_list_addonly_singlylinked_unordered_insert.c lfds710_list_addonly_singlylinked_unordered_query.c \
+ lfds710_btree_addonly_unbalanced_cleanup.c lfds710_btree_addonly_unbalanced_get.c lfds710_btree_addonly_unbalanced_init.c lfds710_btree_addonly_unbalanced_insert.c lfds710_btree_addonly_unbalanced_query.c \
+ lfds710_freelist_cleanup.c lfds710_freelist_init.c lfds710_freelist_pop.c lfds710_freelist_push.c lfds710_freelist_query.c \
+ lfds710_misc_internal_backoff_init.c lfds710_misc_globals.c lfds710_misc_query.c \
+ lfds710_prng_init.c \
+ lfds710_queue_bounded_manyproducer_manyconsumer_cleanup.c lfds710_queue_bounded_manyproducer_manyconsumer_dequeue.c lfds710_queue_bounded_manyproducer_manyconsumer_enqueue.c lfds710_queue_bounded_manyproducer_manyconsumer_init.c lfds710_queue_bounded_manyproducer_manyconsumer_query.c \
+ lfds710_queue_bounded_singleproducer_singleconsumer_cleanup.c lfds710_queue_bounded_singleproducer_singleconsumer_dequeue.c lfds710_queue_bounded_singleproducer_singleconsumer_enqueue.c lfds710_queue_bounded_singleproducer_singleconsumer_init.c lfds710_queue_bounded_singleproducer_singleconsumer_query.c \
+ lfds710_queue_unbounded_manyproducer_manyconsumer_cleanup.c lfds710_queue_unbounded_manyproducer_manyconsumer_dequeue.c lfds710_queue_unbounded_manyproducer_manyconsumer_enqueue.c lfds710_queue_unbounded_manyproducer_manyconsumer_init.c lfds710_queue_unbounded_manyproducer_manyconsumer_query.c \
+ lfds710_ringbuffer_cleanup.c lfds710_ringbuffer_init.c lfds710_ringbuffer_query.c lfds710_ringbuffer_read.c lfds710_ringbuffer_write.c \
+ lfds710_stack_cleanup.c lfds710_stack_init.c lfds710_stack_pop.c lfds710_stack_push.c lfds710_stack_query.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))
+SYSLIBS := -lgcc
+
+##### tools #####
+DG := gcc
+DGFLAGS_MANDATORY := -MM
+DGFLAGS_OPTIONAL := -std=gnu89
+
+CC := gcc
+CFLAGS_MANDATORY := -c -fno-strict-aliasing
+CFLAGS_OPTIONAL := -ffreestanding -nostdinc -std=gnu89 -Wall -Werror -Wno-unknown-pragmas -Wno-unused-but-set-variable -Wno-uninitialized
+CFLAGS_MANDATORY_COV := -O0 -ggdb -DCOVERAGE -fprofile-arcs -ftest-coverage
+CFLAGS_MANDATORY_DBG := -O0 -ggdb -D_DEBUG
+CFLAGS_MANDATORY_PROF := -O0 -ggdb -DPROF -pg
+CFLAGS_MANDATORY_REL := -O2 -DNDEBUG
+CFLAGS_MANDATORY_TSAN := -O0 -ggdb -DTSAN -fsanitize=thread -fPIC
+
+AR := ar
+ARFLAGS :=
+ARFLAGS_MANDATORY := rcs
+ARFLAGS_OPTIONAL :=
+
+LD := gcc
+LDFLAGS_MANDATORY := -shared -Wl,-soname,$(SONAME) -o $(SOPATHNAME)
+LDFLAGS_OPTIONAL := -nodefaultlibs -nostdlib -std=gnu89 -Wall -Werror
+LDFLAGS_MANDATORY_COV := -O0 -fprofile-arcs -ftest-coverage
+LDFLAGS_MANDATORY_DBG := -O0 -ggdb
+LDFLAGS_MANDATORY_PROF := -O0 -pg
+LDFLAGS_MANDATORY_REL := -O2 -s
+LDFLAGS_MANDATORY_TSAN := -O0 -fsanitize=thread -fPIC
+
+##### build variants #####
+ifeq ($(findstring so,$(MAKECMDGOALS)),so)
+ CFLAGS_MANDATORY += -fPIC
+endif
+
+# TRD : default to debug
+ifeq ($(MAKECMDGOALS),)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring cov,$(MAKECMDGOALS)),cov)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_COV)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_COV)
+ SYSLIBS += -lgcov
+endif
+
+ifeq ($(findstring dbg,$(MAKECMDGOALS)),dbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring prof,$(MAKECMDGOALS)),prof)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_PROF)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_PROF)
+endif
+
+ifeq ($(findstring rel,$(MAKECMDGOALS)),rel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+endif
+
+ifeq ($(findstring tsan,$(MAKECMDGOALS)),tsan)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_TSAN)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_TSAN)
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.o : %.c
+ $(DG) $(DGFLAGS_OPTIONAL) $(DGFLAGS) $(DGFLAGS_MANDATORY) $< >$(OBJDIR)/$*.d
+ $(CC) $(CFLAGS_OPTIONAL) $(CFLAGS) $(CFLAGS_MANDATORY) -o $@ $<
+
+##### explicit rules #####
+$(ARPATHNAME) : $(OBJECTS)
+ $(AR) $(ARFLAGS_OPTIONAL) $(ARFLAGS) $(ARFLAGS_MANDATORY) $(ARPATHNAME) $(OBJECTS)
+
+$(SOPATHNAME) : $(OBJECTS)
+ $(LD) $(LDFLAGS_OPTIONAL) $(LDFLAGS) $(LDFLAGS_MANDATORY) $(OBJECTS) -lgcov -lgcc -o $(SOPATHNAME)
+ @ln -fs $(SOFILENAME) $(BINDIR)/$(SONAME)
+ @ln -fs $(SOFILENAME) $(BINDIR)/$(SOBASENAME)
+
+##### phony #####
+.PHONY : clean ar_cov ar_dbg ar_prof ar_rel ar_tsan ar_vanilla ar_install ar_uninstall so_dbg so_prof so_rel so_tsan so_vanilla so_install so_uninstall
+
+clean :
+ @rm -f $(BINDIR)/* $(OBJDIR)/*
+
+ar_cov : $(ARPATHNAME) # archive (.a), coverage
+ar_dbg : $(ARPATHNAME) # archive (.a), debug
+ar_prof : $(ARPATHNAME) # archive (.a), profiling
+ar_rel : $(ARPATHNAME) # archive (.a), release
+ar_tsan : $(ARPATHNAME) # archive (.a), thread sanitizer
+ar_vanilla : $(ARPATHNAME) # archive (.a), no specific-build arguments
+ar_install :
+ # TRD : leading backslash to use command rather than alias
+ # as many Linux distros have a built-in alias to force
+ # a prompt ("y/n?") on file overwrite - silent and
+ # unexpected interference which breaks a makefile
+ @mkdir -p $(INSLIBDIR)
+ @\cp $(ARPATHNAME) $(INSLIBDIR)
+ @mkdir -p $(INSINCDIR)
+ @\cp -r $(INCDIR)/* $(INSINCDIR)
+ar_uninstall :
+ @rm $(INSLIBDIR)/$(ARFILENAME)
+ @rm -r $(INSINCDIR)/$(BINNAME)
+ @rm -r $(INSINCDIR)/$(BINNAME).h
+
+# TRD : so_cov currently disabled as it cannot work with -nostdlib -nodefaultlibs
+# so_cov : $(SOPATHNAME) # shared (.so), coverage
+so_dbg : $(SOPATHNAME) # shared (.so), debug
+so_prof : $(SOPATHNAME) # shared (.so), profiling
+so_rel : $(SOPATHNAME) # shared (.so), release
+so_tsan : $(SOPATHNAME) # shared (.so), thread sanitizer
+so_vanilla : $(SOPATHNAME) # shared (.so), no specific-build arguments
+so_install :
+ @mkdir -p $(INSINCDIR)
+ @\cp $(SOPATHNAME) $(INSLIBDIR)
+ @ldconfig -vn $(INSLIBDIR)
+ @ln -s $(SONAME) $(INSLIBDIR)/$(SOBASENAME)
+ @mkdir -p $(INSLIBDIR)
+ @\cp -r $(INCDIR)/* $(INSINCDIR)
+so_uninstall :
+ @rm -f $(INSLIBDIR)/$(SOFILENAME)
+ @rm -f $(INSLIBDIR)/$(SOBASENAME)
+ @rm -f $(INSLIBDIR)/$(SONAME)
+ @rm -r $(INSINCDIR)/$(BINNAME)
+ @rm -r $(INSINCDIR)/$(BINNAME).h
+
+##### dependencies #####
+-include $(DEPENDS)
+
--- /dev/null
+lib-y :=
+
+lib-y += ../../src/lfds710_btree_addonly_unbalanced/lfds710_btree_addonly_unbalanced_cleanup.o
+lib-y += ../../src/lfds710_btree_addonly_unbalanced/lfds710_btree_addonly_unbalanced_get.o
+lib-y += ../../src/lfds710_btree_addonly_unbalanced/lfds710_btree_addonly_unbalanced_init.o
+lib-y += ../../src/lfds710_btree_addonly_unbalanced/lfds710_btree_addonly_unbalanced_insert.o
+lib-y += ../../src/lfds710_btree_addonly_unbalanced/lfds710_btree_addonly_unbalanced_query.o
+
+lib-y += ../../src/lfds710_freelist/lfds710_freelist_cleanup.o
+lib-y += ../../src/lfds710_freelist/lfds710_freelist_init.o
+lib-y += ../../src/lfds710_freelist/lfds710_freelist_pop.o
+lib-y += ../../src/lfds710_freelist/lfds710_freelist_push.o
+lib-y += ../../src/lfds710_freelist/lfds710_freelist_query.o
+
+lib-y += ../../src/lfds710_hash_addonly/lfds710_hash_addonly_cleanup.o
+lib-y += ../../src/lfds710_hash_addonly/lfds710_hash_addonly_get.o
+lib-y += ../../src/lfds710_hash_addonly/lfds710_hash_addonly_init.o
+lib-y += ../../src/lfds710_hash_addonly/lfds710_hash_addonly_insert.o
+lib-y += ../../src/lfds710_hash_addonly/lfds710_hash_addonly_iterate.o
+lib-y += ../../src/lfds710_hash_addonly/lfds710_hash_addonly_query.o
+
+lib-y += ../../src/lfds710_list_addonly_singlylinked_ordered/lfds710_list_addonly_singlylinked_ordered_cleanup.o
+lib-y += ../../src/lfds710_list_addonly_singlylinked_ordered/lfds710_list_addonly_singlylinked_ordered_get.o
+lib-y += ../../src/lfds710_list_addonly_singlylinked_ordered/lfds710_list_addonly_singlylinked_ordered_init.o
+lib-y += ../../src/lfds710_list_addonly_singlylinked_ordered/lfds710_list_addonly_singlylinked_ordered_insert.o
+lib-y += ../../src/lfds710_list_addonly_singlylinked_ordered/lfds710_list_addonly_singlylinked_ordered_query.o
+
+lib-y += ../../src/lfds710_list_addonly_singlylinked_unordered/lfds710_list_addonly_singlylinked_unordered_cleanup.o
+lib-y += ../../src/lfds710_list_addonly_singlylinked_unordered/lfds710_list_addonly_singlylinked_unordered_get.o
+lib-y += ../../src/lfds710_list_addonly_singlylinked_unordered/lfds710_list_addonly_singlylinked_unordered_init.o
+lib-y += ../../src/lfds710_list_addonly_singlylinked_unordered/lfds710_list_addonly_singlylinked_unordered_insert.o
+lib-y += ../../src/lfds710_list_addonly_singlylinked_unordered/lfds710_list_addonly_singlylinked_unordered_query.o
+
+lib-y += ../../src/lfds710_misc/lfds710_misc_internal_backoff_init.o
+lib-y += ../../src/lfds710_misc/lfds710_misc_globals.o
+lib-y += ../../src/lfds710_misc/lfds710_misc_query.o
+
+lib-y += ../../src/lfds710_prng/lfds710_prng_init.o
+
+lib-y += ../../src/lfds710_queue_bounded_manyproducer_manyconsumer/lfds710_queue_bounded_manyproducer_manyconsumer_cleanup.o
+lib-y += ../../src/lfds710_queue_bounded_manyproducer_manyconsumer/lfds710_queue_bounded_manyproducer_manyconsumer_dequeue.o
+lib-y += ../../src/lfds710_queue_bounded_manyproducer_manyconsumer/lfds710_queue_bounded_manyproducer_manyconsumer_enqueue.o
+lib-y += ../../src/lfds710_queue_bounded_manyproducer_manyconsumer/lfds710_queue_bounded_manyproducer_manyconsumer_init.o
+lib-y += ../../src/lfds710_queue_bounded_manyproducer_manyconsumer/lfds710_queue_bounded_manyproducer_manyconsumer_query.o
+
+lib-y += ../../src/lfds710_queue_bounded_singleproducer_singleconsumer/lfds710_queue_bounded_singleproducer_singleconsumer_cleanup.o
+lib-y += ../../src/lfds710_queue_bounded_singleproducer_singleconsumer/lfds710_queue_bounded_singleproducer_singleconsumer_dequeue.o
+lib-y += ../../src/lfds710_queue_bounded_singleproducer_singleconsumer/lfds710_queue_bounded_singleproducer_singleconsumer_enqueue.o
+lib-y += ../../src/lfds710_queue_bounded_singleproducer_singleconsumer/lfds710_queue_bounded_singleproducer_singleconsumer_init.o
+lib-y += ../../src/lfds710_queue_bounded_singleproducer_singleconsumer/lfds710_queue_bounded_singleproducer_singleconsumer_query.o
+
+lib-y += ../../src/lfds710_queue_unbounded_manyproducer_manyconsumer/lfds710_queue_unbounded_manyproducer_manyconsumer_cleanup.o
+lib-y += ../../src/lfds710_queue_unbounded_manyproducer_manyconsumer/lfds710_queue_unbounded_manyproducer_manyconsumer_dequeue.o
+lib-y += ../../src/lfds710_queue_unbounded_manyproducer_manyconsumer/lfds710_queue_unbounded_manyproducer_manyconsumer_enqueue.o
+lib-y += ../../src/lfds710_queue_unbounded_manyproducer_manyconsumer/lfds710_queue_unbounded_manyproducer_manyconsumer_init.o
+lib-y += ../../src/lfds710_queue_unbounded_manyproducer_manyconsumer/lfds710_queue_unbounded_manyproducer_manyconsumer_query.o
+
+lib-y += ../../src/lfds710_ringbuffer/lfds710_ringbuffer_cleanup.o
+lib-y += ../../src/lfds710_ringbuffer/lfds710_ringbuffer_init.o
+lib-y += ../../src/lfds710_ringbuffer/lfds710_ringbuffer_query.o
+lib-y += ../../src/lfds710_ringbuffer/lfds710_ringbuffer_read.o
+lib-y += ../../src/lfds710_ringbuffer/lfds710_ringbuffer_write.o
+
+lib-y += ../../src/lfds710_stack/lfds710_stack_cleanup.o
+lib-y += ../../src/lfds710_stack/lfds710_stack_init.o
+lib-y += ../../src/lfds710_stack/lfds710_stack_pop.o
+lib-y += ../../src/lfds710_stack/lfds710_stack_push.o
+lib-y += ../../src/lfds710_stack/lfds710_stack_query.o
+
+libs-y := ../../bin/
+
+ccflags-y := -I$(src)/../../inc
+ccflags-y += -I$(src)/../../inc/liblfds710
+ccflags-y += -DKERNEL_MODE
+ccflags-y += -DNDEBUG
+ccflags-y += -fno-strict-aliasing
+ccflags-y += -std=gnu89
+ccflags-y += -Wall
+ccflags-y += -Werror
+ccflags-y += -Wno-unknown-pragmas
+ccflags-y += -Wno-unused-but-set-variable
+ccflags-y += -Wno-uninitialized
+
+
+
+
--- /dev/null
+default:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD)
+
+clean:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) clean
+ find ../../src/ -name "*.o" -type f -delete
+
+help:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) help
+
+modules:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) modules
+
+
--- /dev/null
+EXPORTS
+
+lfds710_btree_au_init_valid_on_current_logical_core = lfds710_btree_au_init_valid_on_current_logical_core
+lfds710_btree_au_cleanup = lfds710_btree_au_cleanup
+lfds710_btree_au_insert = lfds710_btree_au_insert
+lfds710_btree_au_get_by_key = lfds710_btree_au_get_by_key
+lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position = lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position
+lfds710_btree_au_get_by_absolute_position = lfds710_btree_au_get_by_absolute_position
+lfds710_btree_au_get_by_relative_position = lfds710_btree_au_get_by_relative_position
+lfds710_btree_au_query = lfds710_btree_au_query
+
+lfds710_freelist_init_valid_on_current_logical_core = lfds710_freelist_init_valid_on_current_logical_core
+lfds710_freelist_cleanup = lfds710_freelist_cleanup
+lfds710_freelist_push = lfds710_freelist_push
+lfds710_freelist_pop = lfds710_freelist_pop
+lfds710_freelist_query = lfds710_freelist_query
+
+lfds710_hash_a_init_valid_on_current_logical_core = lfds710_hash_a_init_valid_on_current_logical_core
+lfds710_hash_a_cleanup = lfds710_hash_a_cleanup
+lfds710_hash_a_insert = lfds710_hash_a_insert
+lfds710_hash_a_get_by_key = lfds710_hash_a_get_by_key
+lfds710_hash_a_iterate_init = lfds710_hash_a_iterate_init
+lfds710_hash_a_iterate = lfds710_hash_a_iterate
+lfds710_hash_a_query = lfds710_hash_a_query
+
+lfds710_list_aso_init_valid_on_current_logical_core = lfds710_list_aso_init_valid_on_current_logical_core
+lfds710_list_aso_cleanup = lfds710_list_aso_cleanup
+lfds710_list_aso_insert = lfds710_list_aso_insert
+lfds710_list_aso_get_by_key = lfds710_list_aso_get_by_key
+lfds710_list_aso_query = lfds710_list_aso_query
+
+lfds710_list_asu_init_valid_on_current_logical_core = lfds710_list_asu_init_valid_on_current_logical_core
+lfds710_list_asu_cleanup = lfds710_list_asu_cleanup
+lfds710_list_asu_insert_at_position = lfds710_list_asu_insert_at_position
+lfds710_list_asu_insert_at_start = lfds710_list_asu_insert_at_start
+lfds710_list_asu_insert_at_end = lfds710_list_asu_insert_at_end
+lfds710_list_asu_insert_after_element = lfds710_list_asu_insert_after_element
+lfds710_list_asu_get_by_key = lfds710_list_asu_get_by_key
+lfds710_list_asu_query = lfds710_list_asu_query
+
+lfds710_misc_query = lfds710_misc_query
+
+lfds710_prng_init_valid_on_current_logical_core = lfds710_prng_init_valid_on_current_logical_core
+lfds710_prng_st_init = lfds710_prng_st_init
+
+lfds710_queue_bmm_init_valid_on_current_logical_core = lfds710_queue_bmm_init_valid_on_current_logical_core
+lfds710_queue_bmm_cleanup = lfds710_queue_bmm_cleanup
+lfds710_queue_bmm_enqueue = lfds710_queue_bmm_enqueue
+lfds710_queue_bmm_dequeue = lfds710_queue_bmm_dequeue
+lfds710_queue_bmm_query = lfds710_queue_bmm_query
+
+lfds710_queue_bss_init_valid_on_current_logical_core = lfds710_queue_bss_init_valid_on_current_logical_core
+lfds710_queue_bss_cleanup = lfds710_queue_bss_cleanup
+lfds710_queue_bss_enqueue = lfds710_queue_bss_enqueue
+lfds710_queue_bss_dequeue = lfds710_queue_bss_dequeue
+lfds710_queue_bss_query = lfds710_queue_bss_query
+
+lfds710_queue_umm_init_valid_on_current_logical_core = lfds710_queue_umm_init_valid_on_current_logical_core
+lfds710_queue_umm_cleanup = lfds710_queue_umm_cleanup
+lfds710_queue_umm_enqueue = lfds710_queue_umm_enqueue
+lfds710_queue_umm_dequeue = lfds710_queue_umm_dequeue
+lfds710_queue_umm_query = lfds710_queue_umm_query
+
+lfds710_ringbuffer_init_valid_on_current_logical_core = lfds710_ringbuffer_init_valid_on_current_logical_core
+lfds710_ringbuffer_cleanup = lfds710_ringbuffer_cleanup
+lfds710_ringbuffer_read = lfds710_ringbuffer_read
+lfds710_ringbuffer_write = lfds710_ringbuffer_write
+lfds710_ringbuffer_query = lfds710_ringbuffer_query
+
+lfds710_stack_init_valid_on_current_logical_core = lfds710_stack_init_valid_on_current_logical_core
+lfds710_stack_cleanup = lfds710_stack_cleanup
+lfds710_stack_push = lfds710_stack_push
+lfds710_stack_pop = lfds710_stack_pop
+lfds710_stack_query = lfds710_stack_query
+
--- /dev/null
+##### paths #####
+BINDIR := ..\..\bin
+INCDIR := ..\..\inc
+OBJDIR := ..\..\obj
+SRCDIR := ..\..\src
+
+##### misc #####
+QUIETLY := 1>nul 2>nul
+NULL :=
+SPACE := $(NULL) # TRD : with a trailing space
+
+##### sources, objects and libraries #####
+BINNAME := liblfds710
+LIB_BINARY := $(BINDIR)\$(BINNAME).lib
+DLL_BINARY := $(BINDIR)\$(BINNAME).dll
+SRCDIRS := lfds710_btree_addonly_unbalanced lfds710_freelist lfds710_hash_addonly lfds710_list_addonly_singlylinked_ordered lfds710_list_addonly_singlylinked_unordered lfds710_misc lfds710_prng lfds710_queue_bounded_manyproducer_manyconsumer lfds710_queue_bounded_singleproducer_singleconsumer lfds710_queue_unbounded_manyproducer_manyconsumer lfds710_ringbuffer lfds710_stack
+SOURCES := lfds710_hash_addonly_cleanup.c lfds710_hash_addonly_get.c lfds710_hash_addonly_init.c lfds710_hash_addonly_insert.c lfds710_hash_addonly_iterate.c lfds710_hash_addonly_query.c \
+ lfds710_list_addonly_singlylinked_ordered_cleanup.c lfds710_list_addonly_singlylinked_ordered_get.c lfds710_list_addonly_singlylinked_ordered_init.c lfds710_list_addonly_singlylinked_ordered_insert.c lfds710_list_addonly_singlylinked_ordered_query.c \
+ lfds710_list_addonly_singlylinked_unordered_cleanup.c lfds710_list_addonly_singlylinked_unordered_get.c lfds710_list_addonly_singlylinked_unordered_init.c lfds710_list_addonly_singlylinked_unordered_insert.c lfds710_list_addonly_singlylinked_unordered_query.c \
+ lfds710_btree_addonly_unbalanced_cleanup.c lfds710_btree_addonly_unbalanced_get.c lfds710_btree_addonly_unbalanced_init.c lfds710_btree_addonly_unbalanced_insert.c lfds710_btree_addonly_unbalanced_query.c \
+ lfds710_freelist_cleanup.c lfds710_freelist_init.c lfds710_freelist_pop.c lfds710_freelist_push.c lfds710_freelist_query.c \
+ lfds710_misc_internal_backoff_init.c lfds710_misc_globals.c lfds710_misc_query.c \
+ lfds710_prng_init.c \
+ lfds710_queue_bounded_manyproducer_manyconsumer_cleanup.c lfds710_queue_bounded_manyproducer_manyconsumer_dequeue.c lfds710_queue_bounded_manyproducer_manyconsumer_enqueue.c lfds710_queue_bounded_manyproducer_manyconsumer_init.c lfds710_queue_bounded_manyproducer_manyconsumer_query.c \
+ lfds710_queue_bounded_singleproducer_singleconsumer_cleanup.c lfds710_queue_bounded_singleproducer_singleconsumer_dequeue.c lfds710_queue_bounded_singleproducer_singleconsumer_enqueue.c lfds710_queue_bounded_singleproducer_singleconsumer_init.c lfds710_queue_bounded_singleproducer_singleconsumer_query.c \
+ lfds710_queue_unbounded_manyproducer_manyconsumer_cleanup.c lfds710_queue_unbounded_manyproducer_manyconsumer_dequeue.c lfds710_queue_unbounded_manyproducer_manyconsumer_enqueue.c lfds710_queue_unbounded_manyproducer_manyconsumer_init.c lfds710_queue_unbounded_manyproducer_manyconsumer_query.c \
+ lfds710_ringbuffer_cleanup.c lfds710_ringbuffer_init.c lfds710_ringbuffer_query.c lfds710_ringbuffer_read.c lfds710_ringbuffer_write.c \
+ lfds710_stack_cleanup.c lfds710_stack_init.c lfds710_stack_pop.c lfds710_stack_push.c lfds710_stack_query.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))
+SYSLIBS := kernel32.lib
+
+##### default paths fix up #####
+INCDIRS := $(patsubst %,%;,$(INCDIR))
+INCLUDE += $(subst $(SPACE),,$(INCDIRS))
+
+##### tools #####
+CC := cl
+CFLAGS_MANDATORY := /c "/Fd$(BINDIR)\$(BINNAME).pdb" /wd 4068
+CFLAGS_OPTIONAL := /DWIN32_LEAN_AND_MEAN /DUNICODE /D_UNICODE /nologo /W4 /WX
+CFLAGS_MANDATORY_DBG := /Od /Gm /Zi /D_DEBUG
+CFLAGS_MANDATORY_REL := /Ox /DNDEBUG
+
+AR := lib
+ARFLAGS :=
+ARFLAGS_MANDATORY := /subsystem:console
+ARFLAGS_OPTIONAL := /nologo /wx /verbose
+
+LD := link
+LDFLAGS_MANDATORY := /def:$(BINNAME).def /dll /nodefaultlib /subsystem:console
+LDFLAGS_OPTIONAL := /nologo /nxcompat /wx
+LDFLAGS_MANDATORY_DBG := /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"
+LDFLAGS_MANDATORY_REL := /incremental:no
+
+##### variants #####
+ifeq ($(MAKECMDGOALS),) # TRD : default to debug lib
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),libdbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),librel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+ CLIB := libcmt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dlldbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := msvcrtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dllrel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+ CLIB := msvcrt.lib
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.obj : %.c
+ $(CC) $(CFLAGS_OPTIONAL) $(CFLAGS) $(CFLAGS_MANDATORY) "/Fo$@" $<
+
+##### explicit rules #####
+$(LIB_BINARY) : $(OBJECTS)
+ $(AR) $(ARFLAGS_OPTIONAL) $(ARFLAGS) $(ARFLAGS_MANDATORY) $(OBJECTS) /out:$(LIB_BINARY)
+
+$(DLL_BINARY) : $(OBJECTS)
+ $(LD) $(LDFLAGS_OPTIONAL) $(LDFLAGS) $(LDFLAGS_MANDATORY) $(CLIB) $(SYSLIBS) $(OBJECTS) /out:$(DLL_BINARY)
+
+##### phony #####
+.PHONY : clean librel libdbg dllrel dlldbg
+
+clean :
+ @erase /Q $(BINDIR)\$(BINNAME).* $(OBJDIR)\*.obj $(QUIETLY)
+
+dllrel : $(DLL_BINARY)
+dlldbg : $(DLL_BINARY)
+
+librel : $(LIB_BINARY)
+libdbg : $(LIB_BINARY)
+
+##### notes #####
+# /wd 4068 : turn off "unknown pragma" warning
+
--- /dev/null
+DIRS = single_dir_for_windows_kernel
+
+
--- /dev/null
+#include "liblfds710_internal.h"
+
+
+
+
+
+/****************************************************************************/
+DRIVER_INITIALIZE DriverEntry;
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+NTSTATUS DriverEntry( struct _DRIVER_OBJECT *DriverObject, PUNICODE_STRING RegistryPath )
+{
+ return STATUS_SUCCESS;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+EXPORTS
+
+lfds710_btree_au_init_valid_on_current_logical_core = lfds710_btree_au_init_valid_on_current_logical_core
+lfds710_btree_au_cleanup = lfds710_btree_au_cleanup
+lfds710_btree_au_insert = lfds710_btree_au_insert
+lfds710_btree_au_get_by_key = lfds710_btree_au_get_by_key
+lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position = lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position
+lfds710_btree_au_get_by_absolute_position = lfds710_btree_au_get_by_absolute_position
+lfds710_btree_au_get_by_relative_position = lfds710_btree_au_get_by_relative_position
+lfds710_btree_au_query = lfds710_btree_au_query
+
+lfds710_freelist_init_valid_on_current_logical_core = lfds710_freelist_init_valid_on_current_logical_core
+lfds710_freelist_cleanup = lfds710_freelist_cleanup
+lfds710_freelist_push = lfds710_freelist_push
+lfds710_freelist_pop = lfds710_freelist_pop
+lfds710_freelist_query = lfds710_freelist_query
+
+lfds710_hash_a_init_valid_on_current_logical_core = lfds710_hash_a_init_valid_on_current_logical_core
+lfds710_hash_a_cleanup = lfds710_hash_a_cleanup
+lfds710_hash_a_insert = lfds710_hash_a_insert
+lfds710_hash_a_get_by_key = lfds710_hash_a_get_by_key
+lfds710_hash_a_iterate_init = lfds710_hash_a_iterate_init
+lfds710_hash_a_iterate = lfds710_hash_a_iterate
+lfds710_hash_a_query = lfds710_hash_a_query
+
+lfds710_list_aso_init_valid_on_current_logical_core = lfds710_list_aso_init_valid_on_current_logical_core
+lfds710_list_aso_cleanup = lfds710_list_aso_cleanup
+lfds710_list_aso_insert = lfds710_list_aso_insert
+lfds710_list_aso_get_by_key = lfds710_list_aso_get_by_key
+lfds710_list_aso_query = lfds710_list_aso_query
+
+lfds710_list_asu_init_valid_on_current_logical_core = lfds710_list_asu_init_valid_on_current_logical_core
+lfds710_list_asu_cleanup = lfds710_list_asu_cleanup
+lfds710_list_asu_insert_at_position = lfds710_list_asu_insert_at_position
+lfds710_list_asu_insert_at_start = lfds710_list_asu_insert_at_start
+lfds710_list_asu_insert_at_end = lfds710_list_asu_insert_at_end
+lfds710_list_asu_insert_after_element = lfds710_list_asu_insert_after_element
+lfds710_list_asu_get_by_key = lfds710_list_asu_get_by_key
+lfds710_list_asu_query = lfds710_list_asu_query
+
+lfds710_misc_query = lfds710_misc_query
+
+lfds710_prng_init_valid_on_current_logical_core = lfds710_prng_init_valid_on_current_logical_core
+lfds710_prng_st_init = lfds710_prng_st_init
+
+lfds710_queue_bmm_init_valid_on_current_logical_core = lfds710_queue_bmm_init_valid_on_current_logical_core
+lfds710_queue_bmm_cleanup = lfds710_queue_bmm_cleanup
+lfds710_queue_bmm_enqueue = lfds710_queue_bmm_enqueue
+lfds710_queue_bmm_dequeue = lfds710_queue_bmm_dequeue
+lfds710_queue_bmm_query = lfds710_queue_bmm_query
+
+lfds710_queue_bss_init_valid_on_current_logical_core = lfds710_queue_bss_init_valid_on_current_logical_core
+lfds710_queue_bss_cleanup = lfds710_queue_bss_cleanup
+lfds710_queue_bss_enqueue = lfds710_queue_bss_enqueue
+lfds710_queue_bss_dequeue = lfds710_queue_bss_dequeue
+lfds710_queue_bss_query = lfds710_queue_bss_query
+
+lfds710_queue_umm_init_valid_on_current_logical_core = lfds710_queue_umm_init_valid_on_current_logical_core
+lfds710_queue_umm_cleanup = lfds710_queue_umm_cleanup
+lfds710_queue_umm_enqueue = lfds710_queue_umm_enqueue
+lfds710_queue_umm_dequeue = lfds710_queue_umm_dequeue
+lfds710_queue_umm_query = lfds710_queue_umm_query
+
+lfds710_ringbuffer_init_valid_on_current_logical_core = lfds710_ringbuffer_init_valid_on_current_logical_core
+lfds710_ringbuffer_cleanup = lfds710_ringbuffer_cleanup
+lfds710_ringbuffer_read = lfds710_ringbuffer_read
+lfds710_ringbuffer_write = lfds710_ringbuffer_write
+lfds710_ringbuffer_query = lfds710_ringbuffer_query
+
+lfds710_stack_init_valid_on_current_logical_core = lfds710_stack_init_valid_on_current_logical_core
+lfds710_stack_cleanup = lfds710_stack_cleanup
+lfds710_stack_push = lfds710_stack_push
+lfds710_stack_pop = lfds710_stack_pop
+lfds710_stack_query = lfds710_stack_query
+
--- /dev/null
+The Windows kernel build environment is primitive and has a number
+of severe limitations; in particular, all source files must be in
+one directory and it is not possible to choose the output binary type
+(static or dynamic library) from the build command line; rather,
+a string has to be modified in a text file used by the build (!)
+
+To deal with these limitations, it is necessary for a Windows kernel
+build to run a batch file prior to building.
+
+There are two batch files, one for static library builds and the other
+for dynamic library builds.
+
+They are both idempotent; you can run them as often as you like and
+switch between them as often as you want. It's all fine; whenever
+you run one of them, it will take you from whatever state you were
+previously in, into the state you want to be in.
+
+Both batch files copy all the sources file into a single directory,
+"/src/single_dir_for_windows_kernel/".
+
+The static library batch file will then copy "/sources.static" into
+"/src/single_dir_for_windows_kernel/", which will cause a static
+library to be built.
+
+The dynamic library batch file will then copy "/sources.dynamic" into
+"/src/single_dir_for_windows_kernel/", which will cause a dynamic
+library to be built. It will also copy "src/driver_entry.c" into
+"/src/single_dir_for_windows_kernel/", since the linker requires
+the DriverEntry function to exist for dynamic libraries, even
+though it's not used.
+
+
--- /dev/null
+@echo off
+rmdir /q /s single_dir_for_windows_kernel 1>nul 2>nul
+mkdir single_dir_for_windows_kernel 1>nul 2>nul
+
+copy /y ..\..\src\lfds710_btree_addonly_unbalanced\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_freelist\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_hash_addonly\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_list_addonly_singlylinked_ordered\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_list_addonly_singlylinked_unordered\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_misc\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_prng\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_queue_bounded_manyproducer_manyconsumer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_queue_bounded_singleproducer_singleconsumer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_queue_unbounded_manyproducer_manyconsumer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_ringbuffer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_stack\* single_dir_for_windows_kernel\ 1>nul 2>nul
+
+copy /y ..\..\src\liblfds710_internal.h single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y driver_entry_renamed_to_avoid_compiler_warning.c single_dir_for_windows_kernel\driver_entry.c 1>nul 2>nul
+copy /y sources.dynamic single_dir_for_windows_kernel\sources 1>nul 2>nul
+
+echo Windows kernel dynamic library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+@echo off
+rmdir /q /s single_dir_for_windows_kernel 1>nul 2>nul
+mkdir single_dir_for_windows_kernel 1>nul 2>nul
+
+copy /y ..\..\src\lfds710_btree_addonly_unbalanced\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_freelist\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_hash_addonly\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_list_addonly_singlylinked_ordered\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_list_addonly_singlylinked_unordered\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_misc\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_prng\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_queue_bounded_manyproducer_manyconsumer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_queue_bounded_singleproducer_singleconsumer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_queue_unbounded_manyproducer_manyconsumer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_ringbuffer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\lfds710_stack\* single_dir_for_windows_kernel\ 1>nul 2>nul
+
+copy /y ..\..\src\liblfds710_internal.h single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y sources.static single_dir_for_windows_kernel\sources 1>nul 2>nul
+
+echo Windows kernel static library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /wd4127 /W4
+DLLDEF = ../liblfds710.def
+TARGETNAME = liblfds710
+TARGETPATH = ../../../bin/
+TARGETTYPE = EXPORT_DRIVER
+UMTYPE = nt
+USER_C_FLAGS = /DKERNEL_MODE /DNDEBUG
+
+INCLUDES = ../../../inc/
+SOURCES = lfds710_btree_addonly_unbalanced_cleanup.c \
+ lfds710_btree_addonly_unbalanced_get.c \
+ lfds710_btree_addonly_unbalanced_init.c \
+ lfds710_btree_addonly_unbalanced_insert.c \
+ lfds710_btree_addonly_unbalanced_query.c \
+ lfds710_freelist_cleanup.c \
+ lfds710_freelist_init.c \
+ lfds710_freelist_pop.c \
+ lfds710_freelist_push.c \
+ lfds710_freelist_query.c \
+ lfds710_hash_addonly_cleanup.c \
+ lfds710_hash_addonly_get.c \
+ lfds710_hash_addonly_init.c \
+ lfds710_hash_addonly_insert.c \
+ lfds710_hash_addonly_iterate.c \
+ lfds710_hash_addonly_query.c \
+ lfds710_list_addonly_singlylinked_ordered_cleanup.c \
+ lfds710_list_addonly_singlylinked_ordered_get.c \
+ lfds710_list_addonly_singlylinked_ordered_init.c \
+ lfds710_list_addonly_singlylinked_ordered_insert.c \
+ lfds710_list_addonly_singlylinked_ordered_query.c \
+ lfds710_list_addonly_singlylinked_unordered_cleanup.c \
+ lfds710_list_addonly_singlylinked_unordered_get.c \
+ lfds710_list_addonly_singlylinked_unordered_init.c \
+ lfds710_list_addonly_singlylinked_unordered_insert.c \
+ lfds710_list_addonly_singlylinked_unordered_query.c \
+ lfds710_misc_globals.c \
+ lfds710_misc_internal_backoff_init.c \
+ lfds710_misc_query.c \
+ lfds710_prng_init.c \
+ lfds710_queue_bounded_manyproducer_manyconsumer_cleanup.c \
+ lfds710_queue_bounded_manyproducer_manyconsumer_dequeue.c \
+ lfds710_queue_bounded_manyproducer_manyconsumer_enqueue.c \
+ lfds710_queue_bounded_manyproducer_manyconsumer_init.c \
+ lfds710_queue_bounded_manyproducer_manyconsumer_query.c \
+ lfds710_queue_bounded_singleproducer_singleconsumer_cleanup.c \
+ lfds710_queue_bounded_singleproducer_singleconsumer_dequeue.c \
+ lfds710_queue_bounded_singleproducer_singleconsumer_enqueue.c \
+ lfds710_queue_bounded_singleproducer_singleconsumer_init.c \
+ lfds710_queue_bounded_singleproducer_singleconsumer_query.c \
+ lfds710_queue_unbounded_manyproducer_manyconsumer_cleanup.c \
+ lfds710_queue_unbounded_manyproducer_manyconsumer_dequeue.c \
+ lfds710_queue_unbounded_manyproducer_manyconsumer_enqueue.c \
+ lfds710_queue_unbounded_manyproducer_manyconsumer_init.c \
+ lfds710_queue_unbounded_manyproducer_manyconsumer_query.c \
+ lfds710_ringbuffer_cleanup.c \
+ lfds710_ringbuffer_init.c \
+ lfds710_ringbuffer_query.c \
+ lfds710_ringbuffer_read.c \
+ lfds710_ringbuffer_write.c \
+ lfds710_stack_cleanup.c \
+ lfds710_stack_init.c \
+ lfds710_stack_pop.c \
+ lfds710_stack_push.c \
+ lfds710_stack_query.c \
+ driver_entry.c
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /wd4127 /W4
+TARGETNAME = liblfds710
+TARGETPATH = ../../../bin/
+TARGETTYPE = DRIVER_LIBRARY
+UMTYPE = nt
+USER_C_FLAGS = /DKERNEL_MODE /DNDEBUG
+
+INCLUDES = ../../../inc/
+SOURCES = lfds710_btree_addonly_unbalanced_cleanup.c \
+ lfds710_btree_addonly_unbalanced_get.c \
+ lfds710_btree_addonly_unbalanced_init.c \
+ lfds710_btree_addonly_unbalanced_insert.c \
+ lfds710_btree_addonly_unbalanced_query.c \
+ lfds710_freelist_cleanup.c \
+ lfds710_freelist_init.c \
+ lfds710_freelist_pop.c \
+ lfds710_freelist_push.c \
+ lfds710_freelist_query.c \
+ lfds710_hash_addonly_cleanup.c \
+ lfds710_hash_addonly_get.c \
+ lfds710_hash_addonly_init.c \
+ lfds710_hash_addonly_insert.c \
+ lfds710_hash_addonly_iterate.c \
+ lfds710_hash_addonly_query.c \
+ lfds710_list_addonly_singlylinked_ordered_cleanup.c \
+ lfds710_list_addonly_singlylinked_ordered_get.c \
+ lfds710_list_addonly_singlylinked_ordered_init.c \
+ lfds710_list_addonly_singlylinked_ordered_insert.c \
+ lfds710_list_addonly_singlylinked_ordered_query.c \
+ lfds710_list_addonly_singlylinked_unordered_cleanup.c \
+ lfds710_list_addonly_singlylinked_unordered_get.c \
+ lfds710_list_addonly_singlylinked_unordered_init.c \
+ lfds710_list_addonly_singlylinked_unordered_insert.c \
+ lfds710_list_addonly_singlylinked_unordered_query.c \
+ lfds710_misc_globals.c \
+ lfds710_misc_internal_backoff_init.c \
+ lfds710_misc_query.c \
+ lfds710_prng_init.c \
+ lfds710_queue_bounded_manyproducer_manyconsumer_cleanup.c \
+ lfds710_queue_bounded_manyproducer_manyconsumer_dequeue.c \
+ lfds710_queue_bounded_manyproducer_manyconsumer_enqueue.c \
+ lfds710_queue_bounded_manyproducer_manyconsumer_init.c \
+ lfds710_queue_bounded_manyproducer_manyconsumer_query.c \
+ lfds710_queue_bounded_singleproducer_singleconsumer_cleanup.c \
+ lfds710_queue_bounded_singleproducer_singleconsumer_dequeue.c \
+ lfds710_queue_bounded_singleproducer_singleconsumer_enqueue.c \
+ lfds710_queue_bounded_singleproducer_singleconsumer_init.c \
+ lfds710_queue_bounded_singleproducer_singleconsumer_query.c \
+ lfds710_queue_unbounded_manyproducer_manyconsumer_cleanup.c \
+ lfds710_queue_unbounded_manyproducer_manyconsumer_dequeue.c \
+ lfds710_queue_unbounded_manyproducer_manyconsumer_enqueue.c \
+ lfds710_queue_unbounded_manyproducer_manyconsumer_init.c \
+ lfds710_queue_unbounded_manyproducer_manyconsumer_query.c \
+ lfds710_ringbuffer_cleanup.c \
+ lfds710_ringbuffer_init.c \
+ lfds710_ringbuffer_query.c \
+ lfds710_ringbuffer_read.c \
+ lfds710_ringbuffer_write.c \
+ lfds710_stack_cleanup.c \
+ lfds710_stack_init.c \
+ lfds710_stack_pop.c \
+ lfds710_stack_push.c \
+ lfds710_stack_query.c
+
--- /dev/null
+#ifndef LIBLFDS710_H
+
+ /***** defines *****/
+ #define LIBLFDS710_H
+
+ /***** pragmas on *****/
+ #pragma warning( push )
+ #pragma warning( disable : 4324 ) // TRD : 4324 disables MSVC warnings for structure alignment padding due to alignment specifiers
+ #pragma prefast( disable : 28113 28182 28183, "blah" )
+
+ /***** includes *****/
+ #include "liblfds710/lfds710_porting_abstraction_layer_compiler.h"
+ #include "liblfds710/lfds710_porting_abstraction_layer_operating_system.h"
+ #include "liblfds710/lfds710_porting_abstraction_layer_processor.h"
+
+ #include "liblfds710/lfds710_prng.h" // TRD : misc requires prng
+ #include "liblfds710/lfds710_misc.h" // TRD : everything after depends on misc
+ #include "liblfds710/lfds710_btree_addonly_unbalanced.h" // TRD : hash_addonly depends on btree_addonly_unbalanced
+ #include "liblfds710/lfds710_freelist.h"
+ #include "liblfds710/lfds710_hash_addonly.h"
+ #include "liblfds710/lfds710_list_addonly_singlylinked_ordered.h"
+ #include "liblfds710/lfds710_list_addonly_singlylinked_unordered.h"
+ #include "liblfds710/lfds710_queue_bounded_manyproducer_manyconsumer.h"
+ #include "liblfds710/lfds710_queue_bounded_singleproducer_singleconsumer.h"
+ #include "liblfds710/lfds710_queue_unbounded_manyproducer_manyconsumer.h"
+ #include "liblfds710/lfds710_ringbuffer.h"
+ #include "liblfds710/lfds710_stack.h"
+
+ /***** pragmas off *****/
+ #pragma warning( pop )
+
+#endif
+
--- /dev/null
+/***** defines *****/
+#define LFDS710_BTREE_AU_GET_KEY_FROM_ELEMENT( btree_au_element ) ( (btree_au_element).key )
+#define LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( btree_au_element, new_key ) ( (btree_au_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( btree_au_element ) ( LFDS710_MISC_BARRIER_LOAD, (btree_au_element).value )
+#define LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( btree_au_element, new_value ) { LFDS710_PAL_ATOMIC_SET( &(btree_au_element).value, new_value ); }
+#define LFDS710_BTREE_AU_GET_USER_STATE_FROM_STATE( btree_au_state ) ( (btree_au_state).user_state )
+
+/***** enums *****/
+enum lfds710_btree_au_absolute_position
+{
+ LFDS710_BTREE_AU_ABSOLUTE_POSITION_ROOT,
+ LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE,
+ LFDS710_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE
+};
+
+enum lfds710_btree_au_existing_key
+{
+ LFDS710_BTREE_AU_EXISTING_KEY_OVERWRITE,
+ LFDS710_BTREE_AU_EXISTING_KEY_FAIL
+};
+
+enum lfds710_btree_au_insert_result
+{
+ LFDS710_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY,
+ LFDS710_BTREE_AU_INSERT_RESULT_SUCCESS_OVERWRITE,
+ LFDS710_BTREE_AU_INSERT_RESULT_SUCCESS
+};
+
+enum lfds710_btree_au_query
+{
+ LFDS710_BTREE_AU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT,
+ LFDS710_BTREE_AU_QUERY_SINGLETHREADED_VALIDATE
+};
+
+enum lfds710_btree_au_relative_position
+{
+ LFDS710_BTREE_AU_RELATIVE_POSITION_UP,
+ LFDS710_BTREE_AU_RELATIVE_POSITION_LEFT,
+ LFDS710_BTREE_AU_RELATIVE_POSITION_RIGHT,
+ LFDS710_BTREE_AU_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LFDS710_BTREE_AU_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE,
+ LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE
+};
+
+/***** structs *****/
+struct lfds710_btree_au_element
+{
+ /* TRD : we are add-only, so these elements are only written once
+ as such, the write is wholly negligible
+ we are only concerned with getting as many structs in one cache line as we can
+ */
+
+ struct lfds710_btree_au_element LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_SINGLE_POINTER)
+ *volatile left,
+ *volatile right,
+ *volatile up;
+
+ void LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_SINGLE_POINTER)
+ *volatile value;
+
+ void
+ *key;
+};
+
+struct lfds710_btree_au_state
+{
+ struct lfds710_btree_au_element LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_SINGLE_POINTER)
+ *volatile root;
+
+ int
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ enum lfds710_btree_au_existing_key
+ existing_key;
+
+ void
+ *user_state;
+
+ struct lfds710_misc_backoff_state
+ insert_backoff;
+};
+
+/***** public prototypes *****/
+void lfds710_btree_au_init_valid_on_current_logical_core( struct lfds710_btree_au_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum lfds710_btree_au_existing_key existing_key,
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds710_btree_au_cleanup( struct lfds710_btree_au_state *baus,
+ void (*element_cleanup_callback)(struct lfds710_btree_au_state *baus, struct lfds710_btree_au_element *baue) );
+
+enum lfds710_btree_au_insert_result lfds710_btree_au_insert( struct lfds710_btree_au_state *baus,
+ struct lfds710_btree_au_element *baue,
+ struct lfds710_btree_au_element **existing_baue );
+ // TRD : if a link collides with an existing key and existing_baue is non-NULL, existing_baue is set to the existing element
+
+int lfds710_btree_au_get_by_key( struct lfds710_btree_au_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct lfds710_btree_au_element **baue );
+
+int lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position( struct lfds710_btree_au_state *baus,
+ struct lfds710_btree_au_element **baue,
+ enum lfds710_btree_au_absolute_position absolute_position,
+ enum lfds710_btree_au_relative_position relative_position );
+ // TRD : if *baue is NULL, we get the element at position, otherwise we move from *baue according to direction
+
+int lfds710_btree_au_get_by_absolute_position( struct lfds710_btree_au_state *baus,
+ struct lfds710_btree_au_element **baue,
+ enum lfds710_btree_au_absolute_position absolute_position );
+
+int lfds710_btree_au_get_by_relative_position( struct lfds710_btree_au_element **baue,
+ enum lfds710_btree_au_relative_position relative_position );
+
+void lfds710_btree_au_query( struct lfds710_btree_au_state *baus,
+ enum lfds710_btree_au_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS710_FREELIST_GET_KEY_FROM_ELEMENT( freelist_element ) ( (freelist_element).key )
+#define LFDS710_FREELIST_SET_KEY_IN_ELEMENT( freelist_element, new_key ) ( (freelist_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LFDS710_FREELIST_GET_VALUE_FROM_ELEMENT( freelist_element ) ( (freelist_element).value )
+#define LFDS710_FREELIST_SET_VALUE_IN_ELEMENT( freelist_element, new_value ) ( (freelist_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LFDS710_FREELIST_GET_USER_STATE_FROM_STATE( freelist_state ) ( (freelist_state).user_state )
+
+#define LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS ( LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES / sizeof(struct lfds710_freelist_element *) )
+
+/***** enums *****/
+enum lfds710_freelist_query
+{
+ LFDS710_FREELIST_QUERY_SINGLETHREADED_GET_COUNT,
+ LFDS710_FREELIST_QUERY_SINGLETHREADED_VALIDATE,
+ LFDS710_FREELIST_QUERY_GET_ELIMINATION_ARRAY_EXTRA_ELEMENTS_IN_FREELIST_ELEMENTS
+};
+
+/***** structures *****/
+struct lfds710_freelist_element
+{
+ struct lfds710_freelist_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+struct lfds710_freelist_state
+{
+ struct lfds710_freelist_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile top[PAC_SIZE];
+
+ lfds710_pal_uint_t LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ elimination_array_size_in_elements;
+
+ struct lfds710_freelist_element * volatile
+ (*elimination_array)[LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS];
+
+ void
+ *user_state;
+
+ struct lfds710_misc_backoff_state
+ pop_backoff,
+ push_backoff;
+};
+
+/***** public prototypes *****/
+void lfds710_freelist_init_valid_on_current_logical_core( struct lfds710_freelist_state *fs,
+ struct lfds710_freelist_element * volatile (*elimination_array)[LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS],
+ lfds710_pal_uint_t elimination_array_size_in_elements,
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds710_freelist_cleanup( struct lfds710_freelist_state *fs,
+ void (*element_cleanup_callback)(struct lfds710_freelist_state *fs, struct lfds710_freelist_element *fe) );
+
+void lfds710_freelist_push( struct lfds710_freelist_state *fs,
+ struct lfds710_freelist_element *fe,
+ struct lfds710_prng_st_state *psts );
+
+int lfds710_freelist_pop( struct lfds710_freelist_state *fs,
+ struct lfds710_freelist_element **fe,
+ struct lfds710_prng_st_state *psts );
+
+void lfds710_freelist_query( struct lfds710_freelist_state *fs,
+ enum lfds710_freelist_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS710_HASH_A_GET_KEY_FROM_ELEMENT( hash_a_element ) ( (hash_a_element).key )
+#define LFDS710_HASH_A_SET_KEY_IN_ELEMENT( hash_a_element, new_key ) ( (hash_a_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LFDS710_HASH_A_GET_VALUE_FROM_ELEMENT( hash_a_element ) ( LFDS710_MISC_BARRIER_LOAD, (hash_a_element).value )
+#define LFDS710_HASH_A_SET_VALUE_IN_ELEMENT( hash_a_element, new_value ) { LFDS710_PAL_ATOMIC_SET( &(hash_a_element).value, new_value ); }
+#define LFDS710_HASH_A_GET_USER_STATE_FROM_STATE( hash_a_state ) ( (hash_a_state).user_state )
+
+// TRD : a quality hash function, provided for user convenience - note hash must be initialized to 0 before the first call by the user
+
+#if( LFDS710_PAL_ALIGN_SINGLE_POINTER == 4 )
+ // TRD : void *data, lfds710_pal_uint_t data_length_in_bytes, lfds710_pal_uint_t hash
+ #define LFDS710_HASH_A_HASH_FUNCTION( data, data_length_in_bytes, hash ) { \
+ lfds710_pal_uint_t \
+ loop; \
+ \
+ for( loop = 0 ; loop < (data_length_in_bytes) ; loop++ ) \
+ { \
+ (hash) += *( (char unsigned *) (data) + loop ); \
+ (hash) = ((hash) ^ ((hash) >> 16)) * 0x85ebca6bUL; \
+ (hash) = ((hash) ^ ((hash) >> 13)) * 0xc2b2ae35UL; \
+ (hash) = (hash ^ (hash >> 16)); \
+ } \
+ }
+#endif
+
+#if( LFDS710_PAL_ALIGN_SINGLE_POINTER == 8 )
+ // TRD : void *data, lfds710_pal_uint_t data_length_in_bytes, lfds710_pal_uint_t hash
+ #define LFDS710_HASH_A_HASH_FUNCTION( data, data_length_in_bytes, hash ) { \
+ lfds710_pal_uint_t \
+ loop; \
+ \
+ for( loop = 0 ; loop < (data_length_in_bytes) ; loop++ ) \
+ { \
+ (hash) += *( (char unsigned *) (data) + loop ); \
+ (hash) = ((hash) ^ ((hash) >> 30)) * 0xBF58476D1CE4E5B9ULL; \
+ (hash) = ((hash) ^ ((hash) >> 27)) * 0x94D049BB133111EBULL; \
+ (hash) = (hash ^ (hash >> 31)); \
+ } \
+ }
+#endif
+
+/***** enums *****/
+enum lfds710_hash_a_existing_key
+{
+ LFDS710_HASH_A_EXISTING_KEY_OVERWRITE,
+ LFDS710_HASH_A_EXISTING_KEY_FAIL
+};
+
+enum lfds710_hash_a_insert_result
+{
+ LFDS710_HASH_A_PUT_RESULT_FAILURE_EXISTING_KEY,
+ LFDS710_HASH_A_PUT_RESULT_SUCCESS_OVERWRITE,
+ LFDS710_HASH_A_PUT_RESULT_SUCCESS
+};
+
+enum lfds710_hash_a_query
+{
+ LFDS710_HASH_A_QUERY_GET_POTENTIALLY_INACCURATE_COUNT,
+ LFDS710_HASH_A_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structs *****/
+struct lfds710_hash_a_element
+{
+ struct lfds710_btree_au_element
+ baue;
+
+ void
+ *key;
+
+ void LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_SINGLE_POINTER)
+ *volatile value;
+};
+
+struct lfds710_hash_a_iterate
+{
+ struct lfds710_btree_au_element
+ *baue;
+
+ struct lfds710_btree_au_state
+ *baus,
+ *baus_end;
+};
+
+struct lfds710_hash_a_state
+{
+ enum lfds710_hash_a_existing_key
+ existing_key;
+
+ int
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ lfds710_pal_uint_t
+ array_size;
+
+ struct lfds710_btree_au_state
+ *baus_array;
+
+ void
+ (*element_cleanup_callback)( struct lfds710_hash_a_state *has, struct lfds710_hash_a_element *hae ),
+ (*key_hash_function)( void const *key, lfds710_pal_uint_t *hash ),
+ *user_state;
+};
+
+/***** public prototypes *****/
+void lfds710_hash_a_init_valid_on_current_logical_core( struct lfds710_hash_a_state *has,
+ struct lfds710_btree_au_state *baus_array,
+ lfds710_pal_uint_t array_size,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void (*key_hash_function)(void const *key, lfds710_pal_uint_t *hash),
+ enum lfds710_hash_a_existing_key existing_key,
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds710_hash_a_cleanup( struct lfds710_hash_a_state *has,
+ void (*element_cleanup_function)(struct lfds710_hash_a_state *has, struct lfds710_hash_a_element *hae) );
+
+enum lfds710_hash_a_insert_result lfds710_hash_a_insert( struct lfds710_hash_a_state *has,
+ struct lfds710_hash_a_element *hae,
+ struct lfds710_hash_a_element **existing_hae );
+ // TRD : if existing_value is not NULL and the key exists, existing_hae is set to the hash element of the existing key
+
+int lfds710_hash_a_get_by_key( struct lfds710_hash_a_state *has,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void (*key_hash_function)(void const *key, lfds710_pal_uint_t *hash),
+ void *key,
+ struct lfds710_hash_a_element **hae );
+
+void lfds710_hash_a_iterate_init( struct lfds710_hash_a_state *has, struct lfds710_hash_a_iterate *hai );
+int lfds710_hash_a_iterate( struct lfds710_hash_a_iterate *hai, struct lfds710_hash_a_element **hae );
+
+void lfds710_hash_a_query( struct lfds710_hash_a_state *has,
+ enum lfds710_hash_a_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS710_LIST_ASO_GET_START( list_aso_state ) ( LFDS710_MISC_BARRIER_LOAD, (list_aso_state).start->next )
+#define LFDS710_LIST_ASO_GET_NEXT( list_aso_element ) ( LFDS710_MISC_BARRIER_LOAD, (list_aso_element).next )
+#define LFDS710_LIST_ASO_GET_START_AND_THEN_NEXT( list_aso_state, pointer_to_list_aso_element ) ( (pointer_to_list_aso_element) == NULL ? ( (pointer_to_list_aso_element) = LFDS710_LIST_ASO_GET_START(list_aso_state) ) : ( (pointer_to_list_aso_element) = LFDS710_LIST_ASO_GET_NEXT(*(pointer_to_list_aso_element)) ) )
+#define LFDS710_LIST_ASO_GET_KEY_FROM_ELEMENT( list_aso_element ) ( (list_aso_element).key )
+#define LFDS710_LIST_ASO_SET_KEY_IN_ELEMENT( list_aso_element, new_key ) ( (list_aso_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LFDS710_LIST_ASO_GET_VALUE_FROM_ELEMENT( list_aso_element ) ( LFDS710_MISC_BARRIER_LOAD, (list_aso_element).value )
+#define LFDS710_LIST_ASO_SET_VALUE_IN_ELEMENT( list_aso_element, new_value ) { LFDS710_PAL_ATOMIC_SET( &(list_aso_element).value, new_value ); }
+#define LFDS710_LIST_ASO_GET_USER_STATE_FROM_STATE( list_aso_state ) ( (list_aso_state).user_state )
+
+/***** enums *****/
+enum lfds710_list_aso_existing_key
+{
+ LFDS710_LIST_ASO_EXISTING_KEY_OVERWRITE,
+ LFDS710_LIST_ASO_EXISTING_KEY_FAIL
+};
+
+enum lfds710_list_aso_insert_result
+{
+ LFDS710_LIST_ASO_INSERT_RESULT_FAILURE_EXISTING_KEY,
+ LFDS710_LIST_ASO_INSERT_RESULT_SUCCESS_OVERWRITE,
+ LFDS710_LIST_ASO_INSERT_RESULT_SUCCESS
+};
+
+enum lfds710_list_aso_query
+{
+ LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT,
+ LFDS710_LIST_ASO_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structures *****/
+struct lfds710_list_aso_element
+{
+ struct lfds710_list_aso_element LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_SINGLE_POINTER)
+ *volatile next;
+
+ void LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_SINGLE_POINTER)
+ *volatile value;
+
+ void
+ *key;
+};
+
+struct lfds710_list_aso_state
+{
+ struct lfds710_list_aso_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ dummy_element;
+
+ struct lfds710_list_aso_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *start;
+
+ int
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ enum lfds710_list_aso_existing_key
+ existing_key;
+
+ void
+ *user_state;
+
+ struct lfds710_misc_backoff_state
+ insert_backoff;
+};
+
+/***** public prototypes *****/
+void lfds710_list_aso_init_valid_on_current_logical_core( struct lfds710_list_aso_state *lasos,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum lfds710_list_aso_existing_key existing_key,
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds710_list_aso_cleanup( struct lfds710_list_aso_state *lasos,
+ void (*element_cleanup_callback)(struct lfds710_list_aso_state *lasos, struct lfds710_list_aso_element *lasoe) );
+
+enum lfds710_list_aso_insert_result lfds710_list_aso_insert( struct lfds710_list_aso_state *lasos,
+ struct lfds710_list_aso_element *lasoe,
+ struct lfds710_list_aso_element **existing_lasoe );
+
+int lfds710_list_aso_get_by_key( struct lfds710_list_aso_state *lasos,
+ void *key,
+ struct lfds710_list_aso_element **lasoe );
+
+void lfds710_list_aso_query( struct lfds710_list_aso_state *lasos,
+ enum lfds710_list_aso_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS710_LIST_ASU_GET_START( list_asu_state ) ( LFDS710_MISC_BARRIER_LOAD, (list_asu_state).start->next )
+#define LFDS710_LIST_ASU_GET_NEXT( list_asu_element ) ( LFDS710_MISC_BARRIER_LOAD, (list_asu_element).next )
+#define LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT( list_asu_state, pointer_to_list_asu_element ) ( (pointer_to_list_asu_element) == NULL ? ( (pointer_to_list_asu_element) = LFDS710_LIST_ASU_GET_START(list_asu_state) ) : ( (pointer_to_list_asu_element) = LFDS710_LIST_ASU_GET_NEXT(*(pointer_to_list_asu_element)) ) )
+#define LFDS710_LIST_ASU_GET_KEY_FROM_ELEMENT( list_asu_element ) ( (list_asu_element).key )
+#define LFDS710_LIST_ASU_SET_KEY_IN_ELEMENT( list_asu_element, new_key ) ( (list_asu_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( list_asu_element ) ( LFDS710_MISC_BARRIER_LOAD, (list_asu_element).value )
+#define LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( list_asu_element, new_value ) { LFDS710_PAL_ATOMIC_SET( &(list_asu_element).value, new_value ); }
+#define LFDS710_LIST_ASU_GET_USER_STATE_FROM_STATE( list_asu_state ) ( (list_asu_state).user_state )
+
+/***** enums *****/
+enum lfds710_list_asu_position
+{
+ LFDS710_LIST_ASU_POSITION_START,
+ LFDS710_LIST_ASU_POSITION_END,
+ LFDS710_LIST_ASU_POSITION_AFTER
+};
+
+enum lfds710_list_asu_query
+{
+ LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT,
+ LFDS710_LIST_ASU_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structures *****/
+struct lfds710_list_asu_element
+{
+ struct lfds710_list_asu_element LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_SINGLE_POINTER)
+ *volatile next;
+
+ void LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_SINGLE_POINTER)
+ *volatile value;
+
+ void
+ *key;
+};
+
+struct lfds710_list_asu_state
+{
+ struct lfds710_list_asu_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ dummy_element;
+
+ struct lfds710_list_asu_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile end;
+
+ struct lfds710_list_asu_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *start;
+
+ void
+ *user_state;
+
+ struct lfds710_misc_backoff_state
+ after_backoff,
+ end_backoff,
+ start_backoff;
+};
+
+/***** public prototypes *****/
+void lfds710_list_asu_init_valid_on_current_logical_core( struct lfds710_list_asu_state *lasus,
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds710_list_asu_cleanup( struct lfds710_list_asu_state *lasus,
+ void (*element_cleanup_callback)(struct lfds710_list_asu_state *lasus, struct lfds710_list_asu_element *lasue) );
+
+void lfds710_list_asu_insert_at_position( struct lfds710_list_asu_state *lasus,
+ struct lfds710_list_asu_element *lasue,
+ struct lfds710_list_asu_element *lasue_predecessor,
+ enum lfds710_list_asu_position position );
+
+void lfds710_list_asu_insert_at_start( struct lfds710_list_asu_state *lasus,
+ struct lfds710_list_asu_element *lasue );
+
+void lfds710_list_asu_insert_at_end( struct lfds710_list_asu_state *lasus,
+ struct lfds710_list_asu_element *lasue );
+
+void lfds710_list_asu_insert_after_element( struct lfds710_list_asu_state *lasus,
+ struct lfds710_list_asu_element *lasue,
+ struct lfds710_list_asu_element *lasue_predecessor );
+
+int lfds710_list_asu_get_by_key( struct lfds710_list_asu_state *lasus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct lfds710_list_asu_element **lasue );
+
+void lfds710_list_asu_query( struct lfds710_list_asu_state *lasus,
+ enum lfds710_list_asu_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS710_MISC_VERSION_STRING "7.1.0"
+#define LFDS710_MISC_VERSION_INTEGER 710
+
+#ifndef NULL
+ #define NULL ( (void *) 0 )
+#endif
+
+#define POINTER 0
+#define COUNTER 1
+#define PAC_SIZE 2
+
+#define LFDS710_MISC_DELIBERATELY_CRASH { char *c = 0; *c = 0; }
+
+#if( !defined LFDS710_PAL_ATOMIC_ADD )
+ #define LFDS710_PAL_NO_ATOMIC_ADD
+ #define LFDS710_MISC_ATOMIC_SUPPORT_ADD 0
+ #define LFDS710_PAL_ATOMIC_ADD( pointer_to_target, value, result, result_type ) \
+ { \
+ LFDS710_PAL_ASSERT( !"LFDS710_PAL_ATOMIC_ADD not implemented for this platform." ); \
+ LFDS710_MISC_DELIBERATELY_CRASH; \
+ }
+#else
+ #define LFDS710_MISC_ATOMIC_SUPPORT_ADD 1
+#endif
+
+#if( !defined LFDS710_PAL_ATOMIC_CAS )
+ #define LFDS710_PAL_NO_ATOMIC_CAS
+ #define LFDS710_MISC_ATOMIC_SUPPORT_CAS 0
+ #define LFDS710_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ LFDS710_PAL_ASSERT( !"LFDS710_PAL_ATOMIC_CAS not implemented for this platform." ); \
+ (result) = 0; \
+ LFDS710_MISC_DELIBERATELY_CRASH; \
+ }
+#else
+ #define LFDS710_MISC_ATOMIC_SUPPORT_CAS 1
+#endif
+
+#if( !defined LFDS710_PAL_ATOMIC_DWCAS )
+ #define LFDS710_PAL_NO_ATOMIC_DWCAS
+ #define LFDS710_MISC_ATOMIC_SUPPORT_DWCAS 0
+ #define LFDS710_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ LFDS710_PAL_ASSERT( !"LFDS710_PAL_ATOMIC_DWCAS not implemented for this platform." ); \
+ (result) = 0; \
+ LFDS710_MISC_DELIBERATELY_CRASH; \
+ }
+#else
+ #define LFDS710_MISC_ATOMIC_SUPPORT_DWCAS 1
+#endif
+
+#if( !defined LFDS710_PAL_ATOMIC_EXCHANGE )
+ #define LFDS710_PAL_NO_ATOMIC_EXCHANGE
+ #define LFDS710_MISC_ATOMIC_SUPPORT_EXCHANGE 0
+ #define LFDS710_PAL_ATOMIC_EXCHANGE( pointer_to_destination, new_value, original_value, value_type ) \
+ { \
+ LFDS710_PAL_ASSERT( !"LFDS710_PAL_ATOMIC_EXCHANGE not implemented for this platform." ); \
+ LFDS710_MISC_DELIBERATELY_CRASH; \
+ }
+#else
+ #define LFDS710_MISC_ATOMIC_SUPPORT_EXCHANGE 1
+#endif
+
+#if( !defined LFDS710_PAL_ATOMIC_SET )
+ #define LFDS710_PAL_NO_ATOMIC_SET
+ #define LFDS710_MISC_ATOMIC_SUPPORT_SET 0
+ #define LFDS710_PAL_ATOMIC_SET( pointer_to_destination, new_value ) \
+ { \
+ LFDS710_PAL_ASSERT( !"LFDS710_PAL_ATOMIC_SET not implemented for this platform." ); \
+ LFDS710_MISC_DELIBERATELY_CRASH; \
+ }
+#else
+ #define LFDS710_MISC_ATOMIC_SUPPORT_SET 1
+#endif
+
+#if( defined LFDS710_PAL_BARRIER_COMPILER_LOAD && defined LFDS710_PAL_BARRIER_PROCESSOR_LOAD )
+ #define LFDS710_MISC_BARRIER_LOAD ( LFDS710_PAL_BARRIER_COMPILER_LOAD, LFDS710_PAL_BARRIER_PROCESSOR_LOAD, LFDS710_PAL_BARRIER_COMPILER_LOAD )
+#endif
+
+#if( (!defined LFDS710_PAL_BARRIER_COMPILER_LOAD || defined LFDS710_PAL_COMPILER_BARRIERS_MISSING_PRESUMED_HAVING_A_GOOD_TIME) && defined LFDS710_PAL_BARRIER_PROCESSOR_LOAD )
+ #define LFDS710_MISC_BARRIER_LOAD LFDS710_PAL_BARRIER_PROCESSOR_LOAD
+#endif
+
+#if( defined LFDS710_PAL_BARRIER_COMPILER_LOAD && !defined LFDS710_PAL_BARRIER_PROCESSOR_LOAD )
+ #define LFDS710_MISC_BARRIER_LOAD LFDS710_PAL_BARRIER_COMPILER_LOAD
+#endif
+
+#if( !defined LFDS710_PAL_BARRIER_COMPILER_LOAD && !defined LFDS710_PAL_BARRIER_PROCESSOR_LOAD )
+ #define LFDS710_MISC_BARRIER_LOAD
+#endif
+
+#if( defined LFDS710_PAL_BARRIER_COMPILER_STORE && defined LFDS710_PAL_BARRIER_PROCESSOR_STORE )
+ #define LFDS710_MISC_BARRIER_STORE ( LFDS710_PAL_BARRIER_COMPILER_STORE, LFDS710_PAL_BARRIER_PROCESSOR_STORE, LFDS710_PAL_BARRIER_COMPILER_STORE )
+#endif
+
+#if( (!defined LFDS710_PAL_BARRIER_COMPILER_STORE || defined LFDS710_PAL_COMPILER_BARRIERS_MISSING_PRESUMED_HAVING_A_GOOD_TIME) && defined LFDS710_PAL_BARRIER_PROCESSOR_STORE )
+ #define LFDS710_MISC_BARRIER_STORE LFDS710_PAL_BARRIER_PROCESSOR_STORE
+#endif
+
+#if( defined LFDS710_PAL_BARRIER_COMPILER_STORE && !defined LFDS710_PAL_BARRIER_PROCESSOR_STORE )
+ #define LFDS710_MISC_BARRIER_STORE LFDS710_PAL_BARRIER_COMPILER_STORE
+#endif
+
+#if( !defined LFDS710_PAL_BARRIER_COMPILER_STORE && !defined LFDS710_PAL_BARRIER_PROCESSOR_STORE )
+ #define LFDS710_MISC_BARRIER_STORE
+#endif
+
+#if( defined LFDS710_PAL_BARRIER_COMPILER_FULL && defined LFDS710_PAL_BARRIER_PROCESSOR_FULL )
+ #define LFDS710_MISC_BARRIER_FULL ( LFDS710_PAL_BARRIER_COMPILER_FULL, LFDS710_PAL_BARRIER_PROCESSOR_FULL, LFDS710_PAL_BARRIER_COMPILER_FULL )
+#endif
+
+#if( (!defined LFDS710_PAL_BARRIER_COMPILER_FULL || defined LFDS710_PAL_COMPILER_BARRIERS_MISSING_PRESUMED_HAVING_A_GOOD_TIME) && defined LFDS710_PAL_BARRIER_PROCESSOR_FULL )
+ #define LFDS710_MISC_BARRIER_FULL LFDS710_PAL_BARRIER_PROCESSOR_FULL
+#endif
+
+#if( defined LFDS710_PAL_BARRIER_COMPILER_FULL && !defined LFDS710_PAL_BARRIER_PROCESSOR_FULL )
+ #define LFDS710_MISC_BARRIER_FULL LFDS710_PAL_BARRIER_COMPILER_FULL
+#endif
+
+#if( !defined LFDS710_PAL_BARRIER_COMPILER_FULL && !defined LFDS710_PAL_BARRIER_PROCESSOR_FULL )
+ #define LFDS710_MISC_BARRIER_FULL
+#endif
+
+#if( (defined LFDS710_PAL_BARRIER_COMPILER_LOAD && defined LFDS710_PAL_BARRIER_COMPILER_STORE && defined LFDS710_PAL_BARRIER_COMPILER_FULL) || (defined LFDS710_PAL_COMPILER_BARRIERS_MISSING_PRESUMED_HAVING_A_GOOD_TIME) )
+ #define LFDS710_MISC_ATOMIC_SUPPORT_COMPILER_BARRIERS 1
+#else
+ #define LFDS710_MISC_ATOMIC_SUPPORT_COMPILER_BARRIERS 0
+#endif
+
+#if( defined LFDS710_PAL_BARRIER_PROCESSOR_LOAD && defined LFDS710_PAL_BARRIER_PROCESSOR_STORE && defined LFDS710_PAL_BARRIER_PROCESSOR_FULL )
+ #define LFDS710_MISC_ATOMIC_SUPPORT_PROCESSOR_BARRIERS 1
+#else
+ #define LFDS710_MISC_ATOMIC_SUPPORT_PROCESSOR_BARRIERS 0
+#endif
+
+#define LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE LFDS710_MISC_BARRIER_LOAD
+#define LFDS710_MISC_FLUSH { LFDS710_MISC_BARRIER_STORE; lfds710_misc_force_store(); }
+
+/***** enums *****/
+enum lfds710_misc_cas_strength
+{
+ // TRD : GCC defined values
+ LFDS710_MISC_CAS_STRENGTH_STRONG = 0,
+ LFDS710_MISC_CAS_STRENGTH_WEAK = 1,
+};
+
+enum lfds710_misc_validity
+{
+ LFDS710_MISC_VALIDITY_UNKNOWN,
+ LFDS710_MISC_VALIDITY_VALID,
+ LFDS710_MISC_VALIDITY_INVALID_LOOP,
+ LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS,
+ LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS,
+ LFDS710_MISC_VALIDITY_INVALID_TEST_DATA,
+ LFDS710_MISC_VALIDITY_INVALID_ORDER,
+ LFDS710_MISC_VALIDITY_INVALID_ATOMIC_FAILED,
+ LFDS710_MISC_VALIDITY_INDETERMINATE_NONATOMIC_PASSED,
+};
+
+enum lfds710_misc_flag
+{
+ LFDS710_MISC_FLAG_LOWERED,
+ LFDS710_MISC_FLAG_RAISED
+};
+
+enum lfds710_misc_query
+{
+ LFDS710_MISC_QUERY_GET_BUILD_AND_VERSION_STRING
+};
+
+enum lfds710_misc_data_structure
+{
+ LFDS710_MISC_DATA_STRUCTURE_BTREE_AU,
+ LFDS710_MISC_DATA_STRUCTURE_FREELIST,
+ LFDS710_MISC_DATA_STRUCTURE_HASH_A,
+ LFDS710_MISC_DATA_STRUCTURE_LIST_AOS,
+ LFDS710_MISC_DATA_STRUCTURE_LIST_ASU,
+ LFDS710_MISC_DATA_STRUCTURE_QUEUE_BMM,
+ LFDS710_MISC_DATA_STRUCTURE_QUEUE_BSS,
+ LFDS710_MISC_DATA_STRUCTURE_QUEUE_UMM,
+ LFDS710_MISC_DATA_STRUCTURE_RINGBUFFER,
+ LFDS710_MISC_DATA_STRUCTURE_STACK,
+ LFDS710_MISC_DATA_STRUCTURE_COUNT
+};
+
+/***** struct *****/
+struct lfds710_misc_backoff_state
+{
+ lfds710_pal_uint_t volatile LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock;
+
+ lfds710_pal_uint_t
+ backoff_iteration_frequency_counters[2],
+ metric,
+ total_operations;
+};
+
+struct lfds710_misc_globals
+{
+ struct lfds710_prng_state
+ ps;
+};
+
+struct lfds710_misc_validation_info
+{
+ lfds710_pal_uint_t
+ min_elements,
+ max_elements;
+};
+
+/***** externs *****/
+extern struct lfds710_misc_globals
+ lfds710_misc_globals;
+
+/***** public prototypes *****/
+static LFDS710_PAL_INLINE void lfds710_misc_force_store( void );
+
+void lfds710_misc_query( enum lfds710_misc_query query_type, void *query_input, void *query_output );
+
+/***** public in-line functions *****/
+#pragma prefast( disable : 28112, "blah" )
+
+static LFDS710_PAL_INLINE void lfds710_misc_force_store()
+{
+ lfds710_pal_uint_t volatile LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ destination;
+
+ LFDS710_PAL_ATOMIC_SET( &destination, 0 );
+
+ return;
+}
+
--- /dev/null
+/****************************************************************************/
+#if( defined __GNUC__ )
+ // TRD : makes checking GCC versions much tidier
+ #define LFDS710_PAL_GCC_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ )
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _MSC_VER && _MSC_VER >= 1400 )
+
+ #ifdef LFDS710_PAL_COMPILER
+ #error More than one porting abstraction layer matches the current platform in lfds710_porting_abstraction_layer_compiler.h
+ #endif
+
+ #define LFDS710_PAL_COMPILER
+
+ #define LFDS710_PAL_COMPILER_STRING "MSVC"
+
+ #define LFDS710_PAL_ALIGN(alignment) __declspec( align(alignment) )
+ #define LFDS710_PAL_INLINE __forceinline
+
+ #define LFDS710_PAL_BARRIER_COMPILER_LOAD _ReadBarrier()
+ #define LFDS710_PAL_BARRIER_COMPILER_STORE _WriteBarrier()
+ #define LFDS710_PAL_BARRIER_COMPILER_FULL _ReadWriteBarrier()
+
+ /* TRD : there are four processors to consider;
+
+ . ARM32 (32 bit, ADD, CAS, DWCAS, EXCHANGE, SET) (defined _M_ARM)
+ . Itanium (64 bit, ADD, CAS, EXCHANGE, SET) (defined _M_IA64)
+ . x64 (64 bit, ADD, CAS, DWCAS, EXCHANGE, SET) (defined _M_X64 || defined _M_AMD64)
+ . x86 (32 bit, ADD, CAS, DWCAS, EXCHANGE, SET) (defined _M_IX86)
+
+ can't find any indications of 64-bit ARM support yet
+
+ ARM has better intrinsics than the others, as there are no-fence variants
+
+ in theory we also have to deal with 32-bit Windows on a 64-bit platform,
+ and I presume we'd see the compiler properly indicate this in its macros,
+ but this would require that we use 32-bit atomics on the 64-bit platforms,
+ while keeping 64-bit cache line lengths and so on, and this is just so
+ wierd a thing to do these days that it's not supported
+ */
+
+ #if( defined _M_ARM )
+ #define LFDS710_PAL_BARRIER_PROCESSOR_LOAD __dmb( _ARM_BARRIER_ISH )
+ #define LFDS710_PAL_BARRIER_PROCESSOR_STORE __dmb( _ARM_BARRIER_ISHST )
+ #define LFDS710_PAL_BARRIER_PROCESSOR_FULL __dmb( _ARM_BARRIER_ISH )
+
+ #define LFDS710_PAL_ATOMIC_ADD( pointer_to_target, value, result, result_type ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (result) = (result_type) _InterlockedAdd_nf( (int long volatile *) (pointer_to_target), (int long) (value) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+
+ #define LFDS710_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ lfds710_pal_uint_t \
+ original_compare; \
+ \
+ original_compare = (lfds710_pal_uint_t) *(pointer_to_compare); \
+ \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ *(lfds710_pal_uint_t *) (pointer_to_compare) = (lfds710_pal_uint_t) _InterlockedCompareExchange_nf( (long volatile *) (pointer_to_destination), (long) (new_destination), (long) *(pointer_to_compare) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ \
+ result = (char unsigned) ( original_compare == (lfds710_pal_uint_t) *(pointer_to_compare) ); \
+ }
+
+ #define LFDS710_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ __int64 \
+ original_compare; \
+ \
+ original_compare = *(__int64 *) (pointer_to_compare); \
+ \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ *(__int64 *) (pointer_to_compare) = _InterlockedCompareExchange64_nf( (__int64 volatile *) (pointer_to_destination), *(__int64 *) (pointer_to_new_destination), *(__int64 *) (pointer_to_compare) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ \
+ (result) = (char unsigned) ( *(__int64 *) (pointer_to_compare) == original_compare ); \
+ }
+
+ #define LFDS710_PAL_ATOMIC_EXCHANGE( pointer_to_destination, exchange, exchange_type ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (exchange) = (exchange_type) _InterlockedExchange_nf( (int long volatile *) (pointer_to_destination), (int long) (exchange) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+
+ #define LFDS710_PAL_ATOMIC_SET( pointer_to_destination, new_value ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (void) _InterlockedExchange_nf( (int long volatile *) (pointer_to_destination), (int long) (new_value) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+ #endif
+
+ #if( defined _M_IA64 )
+ #define LFDS710_PAL_BARRIER_PROCESSOR_LOAD __mf()
+ #define LFDS710_PAL_BARRIER_PROCESSOR_STORE __mf()
+ #define LFDS710_PAL_BARRIER_PROCESSOR_FULL __mf()
+
+ #define LFDS710_PAL_ATOMIC_ADD( pointer_to_target, value, result, result_type ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (result) = (result_type) _InterlockedAdd64_acq( (__int64 volatile *) (pointer_to_target), (__int64) (value) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+
+ #define LFDS710_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ lfds710_pal_uint_t \
+ original_compare; \
+ \
+ original_compare = (lfds710_pal_uint_t) *(pointer_to_compare); \
+ \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ *(lfds710_pal_uint_t *) (pointer_to_compare) = (lfds710_pal_uint_t) _InterlockedCompareExchange64_acq( (__int64 volatile *) (pointer_to_destination), (__int64) (new_destination), (__int64) *(pointer_to_compare) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ \
+ result = (char unsigned) ( original_compare == (lfds710_pal_uint_t) *(pointer_to_compare) ); \
+ }
+
+ #define LFDS710_PAL_ATOMIC_EXCHANGE( pointer_to_destination, exchange, exchange_type ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (exchange) = (exchange_type) _InterlockedExchange64_acq( (__int64 volatile *) (pointer_to_destination), (__int64) (exchange) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+
+ #define LFDS710_PAL_ATOMIC_SET( pointer_to_destination, new_value ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (void) _InterlockedExchange64_acq( (__int64 volatile *) (pointer_to_destination), (__int64) (new_value) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+ #endif
+
+ #if( defined _M_X64 || defined _M_AMD64 )
+ #define LFDS710_PAL_BARRIER_PROCESSOR_LOAD _mm_lfence()
+ #define LFDS710_PAL_BARRIER_PROCESSOR_STORE _mm_sfence()
+ #define LFDS710_PAL_BARRIER_PROCESSOR_FULL _mm_mfence()
+
+ // TRD : no _InterlockedAdd64 for x64 - only the badly named _InterlockedExchangeAdd64, which is the same as _InterlockedAdd64 but returns the *original* value (which we must then add to before we return)
+ #define LFDS710_PAL_ATOMIC_ADD( pointer_to_target, value, result, result_type ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (result) = (result_type) _InterlockedExchangeAdd64( (__int64 volatile *) (pointer_to_target), (__int64) (value) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ result += value; \
+ }
+
+ #define LFDS710_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ lfds710_pal_uint_t \
+ original_compare; \
+ \
+ original_compare = (lfds710_pal_uint_t) *(pointer_to_compare); \
+ \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ *(lfds710_pal_uint_t *) (pointer_to_compare) = (lfds710_pal_uint_t) _InterlockedCompareExchange64( (__int64 volatile *) (pointer_to_destination), (__int64) (new_destination), (__int64) *(pointer_to_compare) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ \
+ result = (char unsigned) ( original_compare == (lfds710_pal_uint_t) *(pointer_to_compare) ); \
+ }
+
+ #if( _MSC_VER >= 1500 )
+ #define LFDS710_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (result) = (char unsigned) _InterlockedCompareExchange128( (__int64 volatile *) (pointer_to_destination), (__int64) (pointer_to_new_destination[1]), (__int64) (pointer_to_new_destination[0]), (__int64 *) (pointer_to_compare) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+ #endif
+
+ #define LFDS710_PAL_ATOMIC_EXCHANGE( pointer_to_destination, exchange, exchange_type ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (exchange) = (exchange_type) _InterlockedExchange64( (__int64 volatile *) (pointer_to_destination), (__int64) (exchange) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+
+ #define LFDS710_PAL_ATOMIC_SET( pointer_to_destination, new_value ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (void) _InterlockedExchange64( (__int64 volatile *) (pointer_to_destination), (__int64) (new_value) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+ #endif
+
+ #if( defined _M_IX86 )
+ #define LFDS710_PAL_BARRIER_PROCESSOR_LOAD lfds710_misc_force_store()
+ #define LFDS710_PAL_BARRIER_PROCESSOR_STORE lfds710_misc_force_store()
+ #define LFDS710_PAL_BARRIER_PROCESSOR_FULL lfds710_misc_force_store()
+
+ // TRD : no _InterlockedAdd for x86 - only the badly named _InterlockedExchangeAdd, which is the same as _InterlockedAdd but returns the *original* value (which we must then add to before we return)
+ #define LFDS710_PAL_ATOMIC_ADD( pointer_to_target, value, result, result_type ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (result) = (result_type) _InterlockedExchangeAdd( (__int64 volatile *) (pointer_to_target), (__int64) (value) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ result += value; \
+ }
+
+ #define LFDS710_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ lfds710_pal_uint_t \
+ original_compare; \
+ \
+ /* LFDS710_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS710_PAL_ASSERT( (pointer_to_compare) != NULL ); */ \
+ /* TRD : new_destination can be any value in its range */ \
+ /* TRD : cas_strength can be any value in its range */ \
+ /* TRD : result can be any value in its range */ \
+ \
+ original_compare = (lfds710_pal_uint_t) *(pointer_to_compare); \
+ \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ *(lfds710_pal_uint_t *) (pointer_to_compare) = (lfds710_pal_uint_t) _InterlockedCompareExchange( (long volatile *) (pointer_to_destination), (long) (new_destination), (long) *(pointer_to_compare) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ \
+ result = (char unsigned) ( original_compare == (lfds710_pal_uint_t) *(pointer_to_compare) ); \
+ }
+
+ #define LFDS710_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ __int64 \
+ original_compare; \
+ \
+ original_compare = *(__int64 *) (pointer_to_compare); \
+ \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ *(__int64 *) (pointer_to_compare) = _InterlockedCompareExchange64( (__int64 volatile *) (pointer_to_destination), *(__int64 *) (pointer_to_new_destination), *(__int64 *) (pointer_to_compare) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ \
+ (result) = (char unsigned) ( *(__int64 *) (pointer_to_compare) == original_compare ); \
+ }
+
+ #define LFDS710_PAL_ATOMIC_EXCHANGE( pointer_to_destination, exchange, exchange_type ) \
+ { \
+ /* LFDS710_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* LFDS710_PAL_ASSERT( (pointer_to_exchange) != NULL ); */ \
+ \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (exchange) = (exchange_type) _InterlockedExchange( (int long volatile *) (pointer_to_destination), (int long) (exchange) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+
+ #define LFDS710_PAL_ATOMIC_SET( pointer_to_destination, new_value ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (void) _InterlockedExchange( (int long volatile *) (pointer_to_destination), (int long) (new_value) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+ #endif
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && LFDS710_PAL_GCC_VERSION >= 412 && LFDS710_PAL_GCC_VERSION < 473 )
+
+ #ifdef LFDS710_PAL_COMPILER
+ #error More than one porting abstraction layer matches the current platform in lfds710_porting_abstraction_layer_compiler.h
+ #endif
+
+ #define LFDS710_PAL_COMPILER
+
+ #define LFDS710_PAL_COMPILER_STRING "GCC < 4.7.3"
+
+ #define LFDS710_PAL_ALIGN(alignment) __attribute__( (aligned(alignment)) )
+ #define LFDS710_PAL_INLINE inline
+
+ static LFDS710_PAL_INLINE void lfds710_pal_barrier_compiler( void )
+ {
+ __asm__ __volatile__ ( "" : : : "memory" );
+ }
+
+ #define LFDS710_PAL_BARRIER_COMPILER_LOAD lfds710_pal_barrier_compiler()
+ #define LFDS710_PAL_BARRIER_COMPILER_STORE lfds710_pal_barrier_compiler()
+ #define LFDS710_PAL_BARRIER_COMPILER_FULL lfds710_pal_barrier_compiler()
+
+ #define LFDS710_PAL_BARRIER_PROCESSOR_LOAD __sync_synchronize()
+ #define LFDS710_PAL_BARRIER_PROCESSOR_STORE __sync_synchronize()
+ #define LFDS710_PAL_BARRIER_PROCESSOR_FULL __sync_synchronize()
+
+ #define LFDS710_PAL_ATOMIC_ADD( pointer_to_target, value, result, result_type ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (result) = (result_type) __sync_add_and_fetch( (lfds710_pal_uint_t *) (pointer_to_target), (lfds710_pal_uint_t) (value) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+
+ #define LFDS710_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ lfds710_pal_uint_t \
+ original_compare; \
+ \
+ original_compare = (lfds710_pal_uint_t) *(pointer_to_compare); \
+ \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ *(pointer_to_compare) = __sync_val_compare_and_swap( pointer_to_destination, *(pointer_to_compare), new_destination ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ \
+ result = (unsigned char) ( original_compare == (lfds710_pal_uint_t) *(pointer_to_compare) ); \
+ }
+
+ // TRD : ARM and x86 have DWCAS which we can get via GCC intrinsics
+ #if( defined __arm__ || defined __i686__ || defined __i586__ || defined __i486__ )
+ #define LFDS710_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ int long long unsigned \
+ original_destination; \
+ \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ original_destination = __sync_val_compare_and_swap( (int long long unsigned volatile *) (pointer_to_destination), *(int long long unsigned *) (pointer_to_compare), *(int long long unsigned *) (pointer_to_new_destination) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ \
+ (result) = (char unsigned) ( original_destination == *(int long long unsigned *) (pointer_to_compare) ); \
+ \
+ *(int long long unsigned *) (pointer_to_compare) = original_destination; \
+ }
+ #endif
+
+ #define LFDS710_PAL_ATOMIC_EXCHANGE( pointer_to_destination, exchange, exchange_type ) \
+ { \
+ /* LFDS710_PAL_ASSERT( (pointer_to_destination) != NULL ); */ \
+ /* TRD : exchange can be any value in its range */ \
+ /* TRD : exchange_type can be any value in its range */ \
+ \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (exchange) = (exchange_type) __sync_lock_test_and_set( pointer_to_destination, (exchange) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+
+ #define LFDS710_PAL_ATOMIC_SET( pointer_to_destination, new_value ) \
+ { \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ (void) __sync_lock_test_and_set( pointer_to_destination, (new_value) ); \
+ LFDS710_PAL_BARRIER_COMPILER_FULL; \
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && LFDS710_PAL_GCC_VERSION >= 473 )
+
+ #ifdef LFDS710_PAL_COMPILER
+ #error More than one porting abstraction layer matches the current platform in lfds710_porting_abstraction_layer_compiler.h
+ #endif
+
+ #define LFDS710_PAL_COMPILER
+
+ #define LFDS710_PAL_COMPILER_STRING "GCC >= 4.7.3"
+
+ #define LFDS710_PAL_ALIGN(alignment) __attribute__( (aligned(alignment)) )
+ #define LFDS710_PAL_INLINE inline
+
+ // TRD : GCC >= 4.7.3 compiler barriers are built into the intrinsics
+ #define LFDS710_PAL_COMPILER_BARRIERS_MISSING_PRESUMED_HAVING_A_GOOD_TIME
+
+ #define LFDS710_PAL_BARRIER_PROCESSOR_LOAD __atomic_thread_fence( __ATOMIC_ACQUIRE )
+ #define LFDS710_PAL_BARRIER_PROCESSOR_STORE __atomic_thread_fence( __ATOMIC_RELEASE )
+ #define LFDS710_PAL_BARRIER_PROCESSOR_FULL __atomic_thread_fence( __ATOMIC_ACQ_REL )
+
+ #define LFDS710_PAL_ATOMIC_ADD( pointer_to_target, value, result, result_type ) \
+ { \
+ (result) = (result_type) __atomic_add_fetch( (pointer_to_target), (value), __ATOMIC_RELAXED ); \
+ }
+
+ #define LFDS710_PAL_ATOMIC_CAS( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, result ) \
+ { \
+ result = (char unsigned) __atomic_compare_exchange_n( pointer_to_destination, pointer_to_compare, new_destination, cas_strength, __ATOMIC_RELAXED, __ATOMIC_RELAXED ); \
+ }
+
+ // TRD : ARM and x86 have DWCAS which we can get via GCC intrinsics
+ #if( defined __arm__ || defined __i686__ || defined __i586__ || defined __i486__ )
+ #define LFDS710_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ (result) = (char unsigned) __atomic_compare_exchange_n( (int long long unsigned volatile *) (pointer_to_destination), (int long long unsigned *) (pointer_to_compare), *(int long long unsigned *) (pointer_to_new_destination), (cas_strength), __ATOMIC_RELAXED, __ATOMIC_RELAXED ); \
+ }
+ #endif
+
+ #if( defined __x86_64__ )
+ /* TRD : On 64 bit platforms, unsigned long long int is 64 bit, so we must manually use cmpxchg16b,
+ as __sync_val_compare_and_swap() will only emit cmpxchg8b
+ */
+
+ // TRD : lfds710_pal_uint_t volatile (*destination)[2], lfds710_pal_uint_t (*compare)[2], lfds710_pal_uint_t (*new_destination)[2], enum lfds710_misc_cas_strength cas_strength, char unsigned result
+
+ #define LFDS710_PAL_ATOMIC_DWCAS( pointer_to_destination, pointer_to_compare, pointer_to_new_destination, cas_strength, result ) \
+ { \
+ (result) = 0; \
+ \
+ __asm__ __volatile__ \
+ ( \
+ "lock;" /* make cmpxchg16b atomic */ \
+ "cmpxchg16b %0;" /* cmpxchg16b sets ZF on success */ \
+ "setz %4;" /* if ZF set, set result to 1 */ \
+ \
+ /* output */ \
+ : "+m" ((pointer_to_destination)[0]), "+m" ((pointer_to_destination)[1]), "+a" ((pointer_to_compare)[0]), "+d" ((pointer_to_compare)[1]), "=q" (result) \
+ \
+ /* input */ \
+ : "b" ((pointer_to_new_destination)[0]), "c" ((pointer_to_new_destination)[1]) \
+ \
+ /* clobbered */ \
+ : \
+ ); \
+ }
+ #endif
+
+ #define LFDS710_PAL_ATOMIC_EXCHANGE( pointer_to_destination, exchange, exchange_type ) \
+ { \
+ (exchange) = (exchange_type) __atomic_exchange_n( (pointer_to_destination), (exchange), __ATOMIC_RELAXED ); \
+ }
+
+ #define LFDS710_PAL_ATOMIC_SET( pointer_to_destination, new_value ) \
+ { \
+ (void) __atomic_exchange_n( (pointer_to_destination), (new_value), __ATOMIC_RELAXED ); \
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LFDS710_PAL_COMPILER )
+
+ #error No matching porting abstraction layer in lfds710_porting_abstraction_layer_compiler.h
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _WIN32 && !defined KERNEL_MODE )
+
+ #ifdef LFDS710_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LFDS710_PAL_OPERATING_SYSTEM
+
+ #include <assert.h>
+
+ #define LFDS710_PAL_OS_STRING "Windows"
+ #define LFDS710_PAL_ASSERT( expression ) if( !(expression) ) LFDS710_MISC_DELIBERATELY_CRASH;
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && defined KERNEL_MODE )
+
+ #ifdef LFDS710_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LFDS710_PAL_OPERATING_SYSTEM
+
+ #include <assert.h>
+ #include <wdm.h>
+
+ #define LFDS710_PAL_OS_STRING "Windows"
+ #define LFDS710_PAL_ASSERT( expression ) if( !(expression) ) LFDS710_MISC_DELIBERATELY_CRASH;
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && !defined KERNEL_MODE )
+
+ #ifdef LFDS710_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LFDS710_PAL_OPERATING_SYSTEM
+
+ #define LFDS710_PAL_OS_STRING "Linux"
+ #define LFDS710_PAL_ASSERT( expression ) if( !(expression) ) LFDS710_MISC_DELIBERATELY_CRASH;
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && defined KERNEL_MODE )
+
+ #ifdef LFDS710_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LFDS710_PAL_OPERATING_SYSTEM
+
+ #include <linux/module.h>
+
+ #define LFDS710_PAL_OS_STRING "Linux"
+ #define LFDS710_PAL_ASSERT( expression ) BUG_ON( expression )
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LFDS710_PAL_OPERATING_SYSTEM )
+
+ #error No matching porting abstraction layer in lfds710_porting_abstraction_layer_operating_system.h
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _MSC_VER && defined _M_IX86 )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long lfds710_pal_int_t;
+ typedef int long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "x86"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 8
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 32
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _MSC_VER && (defined _M_X64 || defined _M_AMD64) )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long long lfds710_pal_int_t;
+ typedef int long long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "x64"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 16
+
+ // TRD : Intel bring over two cache lines at once, always, unless disabled in BIOS
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 128
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _MSC_VER && defined _M_IA64 )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long long lfds710_pal_int_t;
+ typedef int long long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "IA64"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 64
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _MSC_VER && defined _M_ARM )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long lfds710_pal_int_t;
+ typedef int long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "ARM (32-bit)"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 8
+
+ /* TRD : ARM is LL/SC and uses a reservation granule of 8 to 2048 bytes
+ so the isolation value used here is worst-case - be sure to set
+ this correctly, otherwise structures are painfully large
+
+ the test application has an argument, "-e", which attempts to
+ determine the ERG length
+ */
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 2048
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __arm__ )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long lfds710_pal_int_t;
+ typedef int long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "ARM (32-bit)"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 8
+
+ /* TRD : ARM is LL/SC and uses a reservation granule of 8 to 2048 bytes
+ so the isolation value used here is worst-case - be sure to set
+ this correctly, otherwise structures are painfully large
+
+ the test application has an argument, "-e", which attempts to
+ determine the ERG length
+ */
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 2048
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __aarch64__ )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long long lfds710_pal_int_t;
+ typedef int long long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "ARM (64-bit)"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 16
+
+ /* TRD : ARM is LL/SC and uses a reservation granule of 8 to 2048 bytes
+ so the isolation value used here is worst-case - be sure to set
+ this correctly, otherwise structures are painfully large
+
+ the test application has an argument, "-e", which attempts to
+ determine the ERG length
+ */
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 2048
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && (defined __i686__ || defined __i586__ || defined __i486__) )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long lfds710_pal_int_t;
+ typedef int long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "x86"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 8
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 32
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __x86_64__ )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long long lfds710_pal_int_t;
+ typedef int long long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "x64"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 128
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __alpha__ )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long lfds710_pal_int_t;
+ typedef int long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "alpha"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 64
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __ia64__ )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long long lfds710_pal_int_t;
+ typedef int long long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "IA64"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 64
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __mips__ )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long lfds710_pal_int_t;
+ typedef int long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "MIPS (32-bit)"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 8
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 32
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __mips64 )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long long lfds710_pal_int_t;
+ typedef int long long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "MIPS (64-bit)"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 64
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __ppc__ )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long lfds710_pal_int_t;
+ typedef int long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "POWERPC (32-bit)"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 8
+
+ // TRD : this value is not very certain
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 128
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __ppc64__ )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long long lfds710_pal_int_t;
+ typedef int long long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "POWERPC (64-bit)"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 16
+
+ // TRD : this value is not very certain
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 128
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __sparc__ && !defined __sparc_v9__ )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long lfds710_pal_int_t;
+ typedef int long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "SPARC (32-bit)"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 8
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 32
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __sparc__ && defined __sparc_v9__ )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long long lfds710_pal_int_t;
+ typedef int long long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "SPARC (64-bit)"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 8
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 16
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 64
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __GNUC__ && defined __m68k__ )
+
+ #ifdef LFDS710_PAL_PROCESSOR
+ #error More than one porting abstraction layer matches the current platform in "lfds710_porting_abstraction_layer_processor.h".
+ #endif
+
+ #define LFDS710_PAL_PROCESSOR
+
+ typedef int long lfds710_pal_int_t;
+ typedef int long unsigned lfds710_pal_uint_t;
+
+ #define LFDS710_PAL_PROCESSOR_STRING "680x0"
+
+ #define LFDS710_PAL_ALIGN_SINGLE_POINTER 4
+ #define LFDS710_PAL_ALIGN_DOUBLE_POINTER 8
+
+ #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES 32
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LFDS710_PAL_PROCESSOR )
+
+ #error No matching porting abstraction layer in "lfds710_porting_abstraction_layer_processor.h".
+
+#endif
+
--- /dev/null
+/***** defines *****/
+#define LFDS710_PRNG_MAX ( (lfds710_pal_uint_t) -1 )
+
+/* TRD : the seed is from an on-line hardware RNG, using atmospheric noise
+ the URL below will generate another 16 random hex digits (e.g. a 64-bit number) and is
+ the RNG used to generate the number above (0x0a34655d34c092fe)
+
+ http://www.random.org/integers/?num=16&min=0&max=15&col=1&base=16&format=plain&rnd=new
+
+ the 32 bit seed is the upper half of the 64 bit seed
+
+ the "SplitMix" PRNG is from from Sebastiano vigna's site, CC0 license, http://xorshift.di.unimi.it/splitmix64.c
+ the 64-bit constants come directly from the source, the 32-bt constants are in fact the 32-bit murmurhash3 constants
+*/
+
+#if( LFDS710_PAL_ALIGN_SINGLE_POINTER == 4 )
+ #define LFDS710_PRNG_SEED 0x0a34655dUL
+ #define LFDS710_PRNG_SPLITMIX_MAGIC_RATIO 0x9E3779B9UL
+ #define LFDS710_PRNG_SPLITMIX_SHIFT_CONSTANT_ONE 16
+ #define LFDS710_PRNG_SPLITMIX_SHIFT_CONSTANT_TWO 13
+ #define LFDS710_PRNG_SPLITMIX_SHIFT_CONSTANT_THREE 16
+ #define LFDS710_PRNG_SPLITMIX_MULTIPLY_CONSTANT_ONE 0x85ebca6bUL
+ #define LFDS710_PRNG_SPLITMIX_MULTIPLY_CONSTANT_TWO 0xc2b2ae35UL
+#endif
+
+#if( LFDS710_PAL_ALIGN_SINGLE_POINTER == 8 )
+ #define LFDS710_PRNG_SEED 0x0a34655d34c092feULL
+ #define LFDS710_PRNG_SPLITMIX_MAGIC_RATIO 0x9E3779B97F4A7C15ULL
+ #define LFDS710_PRNG_SPLITMIX_SHIFT_CONSTANT_ONE 30
+ #define LFDS710_PRNG_SPLITMIX_SHIFT_CONSTANT_TWO 27
+ #define LFDS710_PRNG_SPLITMIX_SHIFT_CONSTANT_THREE 31
+ #define LFDS710_PRNG_SPLITMIX_MULTIPLY_CONSTANT_ONE 0xBF58476D1CE4E5B9ULL
+ #define LFDS710_PRNG_SPLITMIX_MULTIPLY_CONSTANT_TWO 0x94D049BB133111EBULL
+#endif
+
+// TRD : struct lfds710_prng_state prng_state, lfds710_pal_uint_t random_value
+#define LFDS710_PRNG_GENERATE( prng_state, random_value ) \
+{ \
+ LFDS710_PAL_ATOMIC_ADD( &(prng_state).entropy, LFDS710_PRNG_SPLITMIX_MAGIC_RATIO, (random_value), lfds710_pal_uint_t ); \
+ LFDS710_PRNG_ST_MIXING_FUNCTION( random_value ); \
+}
+
+// TRD : struct lfds710_prng_state prng_st_state, lfds710_pal_uint_t random_value
+#define LFDS710_PRNG_ST_GENERATE( prng_st_state, random_value ) \
+{ \
+ (random_value) = ( (prng_st_state).entropy += LFDS710_PRNG_SPLITMIX_MAGIC_RATIO ); \
+ LFDS710_PRNG_ST_MIXING_FUNCTION( random_value ); \
+}
+
+// TRD : lfds710_pal_uint_t random_value
+#define LFDS710_PRNG_ST_MIXING_FUNCTION( random_value ) \
+{ \
+ (random_value) = ((random_value) ^ ((random_value) >> LFDS710_PRNG_SPLITMIX_SHIFT_CONSTANT_ONE)) * LFDS710_PRNG_SPLITMIX_MULTIPLY_CONSTANT_ONE; \
+ (random_value) = ((random_value) ^ ((random_value) >> LFDS710_PRNG_SPLITMIX_SHIFT_CONSTANT_TWO)) * LFDS710_PRNG_SPLITMIX_MULTIPLY_CONSTANT_TWO; \
+ (random_value) = (random_value ^ (random_value >> LFDS710_PRNG_SPLITMIX_SHIFT_CONSTANT_THREE)); \
+}
+
+/***** structs *****/
+struct lfds710_prng_state
+{
+ lfds710_pal_uint_t volatile LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ entropy;
+};
+
+struct lfds710_prng_st_state
+{
+ lfds710_pal_uint_t
+ entropy;
+};
+
+/***** public prototypes *****/
+void lfds710_prng_init_valid_on_current_logical_core( struct lfds710_prng_state *ps, lfds710_pal_uint_t seed );
+void lfds710_prng_st_init( struct lfds710_prng_st_state *psts, lfds710_pal_uint_t seed );
+
--- /dev/null
+/***** defines *****/
+#define LFDS710_QUEUE_BMM_GET_USER_STATE_FROM_STATE( queue_bmm_state ) ( (queue_bmm_state).user_state )
+
+/***** enums *****/
+enum lfds710_queue_bmm_query
+{
+ LFDS710_QUEUE_BMM_QUERY_GET_POTENTIALLY_INACCURATE_COUNT,
+ LFDS710_QUEUE_BMM_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structures *****/
+struct lfds710_queue_bmm_element
+{
+ lfds710_pal_uint_t volatile
+ sequence_number;
+
+ void
+ *volatile key,
+ *volatile value;
+};
+
+struct lfds710_queue_bmm_state
+{
+ lfds710_pal_uint_t
+ number_elements,
+ mask;
+
+ lfds710_pal_uint_t volatile LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ read_index,
+ write_index;
+
+ struct lfds710_queue_bmm_element
+ *element_array;
+
+ void
+ *user_state;
+
+ struct lfds710_misc_backoff_state
+ dequeue_backoff,
+ enqueue_backoff;
+};
+
+/***** public prototypes *****/
+void lfds710_queue_bmm_init_valid_on_current_logical_core( struct lfds710_queue_bmm_state *qbmms,
+ struct lfds710_queue_bmm_element *element_array,
+ lfds710_pal_uint_t number_elements,
+ void *user_state );
+
+void lfds710_queue_bmm_cleanup( struct lfds710_queue_bmm_state *qbmms,
+ void (*element_cleanup_callback)(struct lfds710_queue_bmm_state *qbmms,
+ void *key,
+ void *value) );
+
+int lfds710_queue_bmm_enqueue( struct lfds710_queue_bmm_state *qbmms,
+ void *key,
+ void *value );
+
+int lfds710_queue_bmm_dequeue( struct lfds710_queue_bmm_state *qbmms,
+ void **key,
+ void **value );
+
+void lfds710_queue_bmm_query( struct lfds710_queue_bmm_state *qbmms,
+ enum lfds710_queue_bmm_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS710_QUEUE_BSS_GET_USER_STATE_FROM_STATE( queue_bss_state ) ( (queue_bss_state).user_state )
+
+/***** enums *****/
+enum lfds710_queue_bss_query
+{
+ LFDS710_QUEUE_BSS_QUERY_GET_POTENTIALLY_INACCURATE_COUNT,
+ LFDS710_QUEUE_BSS_QUERY_VALIDATE
+};
+
+/***** structures *****/
+struct lfds710_queue_bss_element
+{
+ void
+ *volatile key,
+ *volatile value;
+};
+
+struct lfds710_queue_bss_state
+{
+ lfds710_pal_uint_t
+ number_elements,
+ mask;
+
+ lfds710_pal_uint_t volatile
+ read_index,
+ write_index;
+
+ struct lfds710_queue_bss_element
+ *element_array;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void lfds710_queue_bss_init_valid_on_current_logical_core( struct lfds710_queue_bss_state *qbsss,
+ struct lfds710_queue_bss_element *element_array,
+ lfds710_pal_uint_t number_elements,
+ void *user_state );
+ // TRD : number_elements must be a positive integer power of 2
+ // TRD : used in conjunction with the #define LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds710_queue_bss_cleanup( struct lfds710_queue_bss_state *qbsss,
+ void (*element_cleanup_callback)(struct lfds710_queue_bss_state *qbsss, void *key, void *value) );
+
+int lfds710_queue_bss_enqueue( struct lfds710_queue_bss_state *qbsss,
+ void *key,
+ void *value );
+
+int lfds710_queue_bss_dequeue( struct lfds710_queue_bss_state *qbsss,
+ void **key,
+ void **value );
+
+void lfds710_queue_bss_query( struct lfds710_queue_bss_state *qbsss,
+ enum lfds710_queue_bss_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS710_QUEUE_UMM_GET_KEY_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).key )
+#define LFDS710_QUEUE_UMM_SET_KEY_IN_ELEMENT( queue_umm_element, new_key ) ( (queue_umm_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LFDS710_QUEUE_UMM_GET_VALUE_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).value )
+#define LFDS710_QUEUE_UMM_SET_VALUE_IN_ELEMENT( queue_umm_element, new_value ) ( (queue_umm_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LFDS710_QUEUE_UMM_GET_USER_STATE_FROM_STATE( queue_umm_state ) ( (queue_umm_state).user_state )
+
+/***** enums *****/
+enum lfds710_queue_umm_query
+{
+ LFDS710_QUEUE_UMM_QUERY_SINGLETHREADED_GET_COUNT,
+ LFDS710_QUEUE_UMM_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structures *****/
+struct lfds710_queue_umm_element
+{
+ struct lfds710_queue_umm_element LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_DOUBLE_POINTER)
+ *volatile next[PAC_SIZE];
+
+ void
+ *key,
+ *value;
+};
+
+struct lfds710_queue_umm_state
+{
+ struct lfds710_queue_umm_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile enqueue[PAC_SIZE],
+ *volatile dequeue[PAC_SIZE];
+
+ lfds710_pal_uint_t volatile LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ aba_counter;
+
+ void LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *user_state;
+
+ struct lfds710_misc_backoff_state
+ dequeue_backoff,
+ enqueue_backoff;
+};
+
+/***** public prototypes *****/
+void lfds710_queue_umm_init_valid_on_current_logical_core( struct lfds710_queue_umm_state *qumms,
+ struct lfds710_queue_umm_element *qumme_dummy,
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds710_queue_umm_cleanup( struct lfds710_queue_umm_state *qumms,
+ void (*element_cleanup_callback)(struct lfds710_queue_umm_state *qumms, struct lfds710_queue_umm_element *qumme, enum lfds710_misc_flag dummy_element_flag) );
+
+void lfds710_queue_umm_enqueue( struct lfds710_queue_umm_state *qumms,
+ struct lfds710_queue_umm_element *qumme );
+
+int lfds710_queue_umm_dequeue( struct lfds710_queue_umm_state *qumms,
+ struct lfds710_queue_umm_element **qumme );
+
+void lfds710_queue_umm_query( struct lfds710_queue_umm_state *qumms,
+ enum lfds710_queue_umm_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** enums *****/
+#define LFDS710_RINGBUFFER_GET_USER_STATE_FROM_STATE( ringbuffer_state ) ( (ringbuffer_state).user_state )
+
+/***** enums *****/
+enum lfds710_ringbuffer_query
+{
+ LFDS710_RINGBUFFER_QUERY_SINGLETHREADED_GET_COUNT,
+ LFDS710_RINGBUFFER_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structures *****/
+struct lfds710_ringbuffer_element
+{
+ struct lfds710_freelist_element
+ fe;
+
+ struct lfds710_queue_umm_element
+ qumme;
+
+ struct lfds710_queue_umm_element
+ *qumme_use; // TRD : hack; we need a new queue with no dummy element
+
+ void
+ *key,
+ *value;
+};
+
+struct lfds710_ringbuffer_state
+{
+ struct lfds710_freelist_state
+ fs;
+
+ struct lfds710_queue_umm_state
+ qumms;
+
+ void
+ (*element_cleanup_callback)( struct lfds710_ringbuffer_state *rs, void *key, void *value, enum lfds710_misc_flag unread_flag ),
+ *user_state;
+};
+
+/***** public prototypes *****/
+void lfds710_ringbuffer_init_valid_on_current_logical_core( struct lfds710_ringbuffer_state *rs,
+ struct lfds710_ringbuffer_element *re_array_inc_dummy,
+ lfds710_pal_uint_t number_elements_inc_dummy,
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds710_ringbuffer_cleanup( struct lfds710_ringbuffer_state *rs,
+ void (*element_cleanup_callback)(struct lfds710_ringbuffer_state *rs, void *key, void *value, enum lfds710_misc_flag unread_flag) );
+
+int lfds710_ringbuffer_read( struct lfds710_ringbuffer_state *rs,
+ void **key,
+ void **value );
+
+void lfds710_ringbuffer_write( struct lfds710_ringbuffer_state *rs,
+ void *key,
+ void *value,
+ enum lfds710_misc_flag *overwrite_occurred_flag,
+ void **overwritten_key,
+ void **overwritten_value );
+
+void lfds710_ringbuffer_query( struct lfds710_ringbuffer_state *rs,
+ enum lfds710_ringbuffer_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LFDS710_STACK_GET_KEY_FROM_ELEMENT( stack_element ) ( (stack_element).key )
+#define LFDS710_STACK_SET_KEY_IN_ELEMENT( stack_element, new_key ) ( (stack_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LFDS710_STACK_GET_VALUE_FROM_ELEMENT( stack_element ) ( (stack_element).value )
+#define LFDS710_STACK_SET_VALUE_IN_ELEMENT( stack_element, new_value ) ( (stack_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LFDS710_STACK_GET_USER_STATE_FROM_STATE( stack_state ) ( (stack_state).user_state )
+
+/***** enums *****/
+enum lfds710_stack_query
+{
+ LFDS710_STACK_QUERY_SINGLETHREADED_GET_COUNT,
+ LFDS710_STACK_QUERY_SINGLETHREADED_VALIDATE
+};
+
+/***** structures *****/
+struct lfds710_stack_element
+{
+ struct lfds710_stack_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+struct lfds710_stack_state
+{
+ struct lfds710_stack_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *volatile top[PAC_SIZE];
+
+ void LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *user_state;
+
+ struct lfds710_misc_backoff_state
+ pop_backoff,
+ push_backoff;
+};
+
+/***** public prototypes *****/
+void lfds710_stack_init_valid_on_current_logical_core( struct lfds710_stack_state *ss,
+ void *user_state );
+ // TRD : used in conjunction with the #define LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE
+
+void lfds710_stack_cleanup( struct lfds710_stack_state *ss,
+ void (*element_cleanup_callback)(struct lfds710_stack_state *ss, struct lfds710_stack_element *se) );
+
+void lfds710_stack_push( struct lfds710_stack_state *ss,
+ struct lfds710_stack_element *se );
+
+int lfds710_stack_pop( struct lfds710_stack_state *ss,
+ struct lfds710_stack_element **se );
+
+void lfds710_stack_query( struct lfds710_stack_state *ss,
+ enum lfds710_stack_query query_type,
+ void *query_input,
+ void *query_output );
+
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_btree_addonly_unbalanced_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_btree_au_cleanup( struct lfds710_btree_au_state *baus,
+ void (*element_cleanup_callback)(struct lfds710_btree_au_state *baus, struct lfds710_btree_au_element *baue) )
+{
+ enum lfds710_btree_au_delete_action
+ delete_action = LFDS710_BTREE_AU_DELETE_SELF; // TRD : to remove compiler warning
+
+ struct lfds710_btree_au_element
+ *baue;
+
+ struct lfds710_btree_au_element
+ *temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : element_delete_function can be NULL
+
+ /* TRD : we're not lock-free now, so delete at will
+ but be iterative, so can be used in kernels (where there's little stack)
+ and be performant, since the user may be
+ creating/destroying many of these trees
+ also remember the user may be deallocating user data
+ so we cannot visit an element twice
+
+ we start at the root and iterate till we go to NULL
+ if the element has zero children, we delete it and move up to its parent
+ if the element has one child, we delete it, move its child into its place, and continue from its child
+ if the element has two children, we move left
+
+ the purpose of this is to minimize walking around the tree
+ to prevent visiting an element twice
+ while also minimizing code complexity
+ */
+
+ if( element_cleanup_callback == NULL )
+ return;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ lfds710_btree_au_get_by_absolute_position( baus, &baue, LFDS710_BTREE_AU_ABSOLUTE_POSITION_ROOT );
+
+ while( baue != NULL )
+ {
+ if( baue->left == NULL and baue->right == NULL )
+ delete_action = LFDS710_BTREE_AU_DELETE_SELF;
+
+ if( baue->left != NULL and baue->right == NULL )
+ delete_action = LFDS710_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD;
+
+ if( baue->left == NULL and baue->right != NULL )
+ delete_action = LFDS710_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD;
+
+ if( baue->left != NULL and baue->right != NULL )
+ delete_action = LFDS710_BTREE_AU_DELETE_MOVE_LEFT;
+
+ switch( delete_action )
+ {
+ case LFDS710_BTREE_AU_DELETE_SELF:
+ // TRD : if we have a parent (we could be root) set his point to us to NULL
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = NULL;
+ if( baue->up->right == baue )
+ baue->up->right = NULL;
+ }
+
+ temp = baue;
+ lfds710_btree_au_get_by_relative_position( &baue, LFDS710_BTREE_AU_RELATIVE_POSITION_UP );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LFDS710_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD:
+ baue->left->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->left;
+ if( baue->up->right == baue )
+ baue->up->right = baue->left;
+ }
+
+ temp = baue;
+ lfds710_btree_au_get_by_relative_position( &baue, LFDS710_BTREE_AU_RELATIVE_POSITION_LEFT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LFDS710_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD:
+ baue->right->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->right;
+ if( baue->up->right == baue )
+ baue->up->right = baue->right;
+ }
+
+ temp = baue;
+ lfds710_btree_au_get_by_relative_position( &baue, LFDS710_BTREE_AU_RELATIVE_POSITION_RIGHT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LFDS710_BTREE_AU_DELETE_MOVE_LEFT:
+ lfds710_btree_au_get_by_relative_position( &baue, LFDS710_BTREE_AU_RELATIVE_POSITION_LEFT );
+ break;
+ }
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_btree_addonly_unbalanced_internal.h"
+
+/***** private prototypes *****/
+static void lfds710_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct lfds710_btree_au_element **baue );
+static void lfds710_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct lfds710_btree_au_element **baue );
+
+
+
+
+
+/****************************************************************************/
+int lfds710_btree_au_get_by_key( struct lfds710_btree_au_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct lfds710_btree_au_element **baue )
+{
+ int
+ compare_result = !0,
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : key_compare_function can be NULL
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ if( key_compare_function == NULL )
+ key_compare_function = baus->key_compare_function;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ *baue = baus->root;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ while( *baue != NULL and compare_result != 0 )
+ {
+ compare_result = key_compare_function( key, (*baue)->key );
+
+ if( compare_result < 0 )
+ {
+ *baue = (*baue)->left;
+ LFDS710_MISC_BARRIER_LOAD;
+ }
+
+ if( compare_result > 0 )
+ {
+ *baue = (*baue)->right;
+ LFDS710_MISC_BARRIER_LOAD;
+ }
+ }
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds710_btree_au_get_by_absolute_position( struct lfds710_btree_au_state *baus,
+ struct lfds710_btree_au_element **baue,
+ enum lfds710_btree_au_absolute_position absolute_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : absolute_position can be any value in its range
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ *baue = baus->root;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ switch( absolute_position )
+ {
+ case LFDS710_BTREE_AU_ABSOLUTE_POSITION_ROOT:
+ break;
+
+ case LFDS710_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ {
+ *baue = (*baue)->right;
+ LFDS710_MISC_BARRIER_LOAD;
+ }
+ break;
+
+ case LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ {
+ *baue = (*baue)->left;
+ LFDS710_MISC_BARRIER_LOAD;
+ }
+ break;
+ }
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds710_btree_au_get_by_relative_position( struct lfds710_btree_au_element **baue,
+ enum lfds710_btree_au_relative_position relative_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : relative_position can baue any value in its range
+
+ if( *baue == NULL )
+ return 0;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ switch( relative_position )
+ {
+ case LFDS710_BTREE_AU_RELATIVE_POSITION_UP:
+ *baue = (*baue)->up;
+ // TRD : no load barrier - up already existed, so is known to be safely propagated
+ break;
+
+ case LFDS710_BTREE_AU_RELATIVE_POSITION_LEFT:
+ *baue = (*baue)->left;
+ LFDS710_MISC_BARRIER_LOAD;
+ break;
+
+ case LFDS710_BTREE_AU_RELATIVE_POSITION_RIGHT:
+ *baue = (*baue)->right;
+ LFDS710_MISC_BARRIER_LOAD;
+ break;
+
+ case LFDS710_BTREE_AU_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->left;
+ if( *baue != NULL )
+ {
+ LFDS710_MISC_BARRIER_LOAD;
+ while( (*baue)->right != NULL )
+ {
+ *baue = (*baue)->right;
+ LFDS710_MISC_BARRIER_LOAD;
+ }
+ }
+ break;
+
+ case LFDS710_BTREE_AU_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->right;
+ if( *baue != NULL )
+ {
+ LFDS710_MISC_BARRIER_LOAD;
+ while( (*baue)->left != NULL )
+ {
+ *baue = (*baue)->left;
+ LFDS710_MISC_BARRIER_LOAD;
+ }
+ }
+ break;
+
+ case LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE:
+ lfds710_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( baue );
+ break;
+
+ case LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE:
+ lfds710_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( baue );
+ break;
+ }
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds710_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct lfds710_btree_au_element **baue )
+{
+ enum lfds710_btree_au_move
+ action = LFDS710_BTREE_AU_MOVE_INVALID;
+
+ enum lfds710_misc_flag
+ finished_flag = LFDS710_MISC_FLAG_LOWERED,
+ load_finished_flag = LFDS710_MISC_FLAG_LOWERED;
+
+ struct lfds710_btree_au_element
+ *left = NULL,
+ *right = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ /* TRD : from any given element, the next smallest element is;
+ 1. if we have a left, it's the largest element on the right branch of our left child
+ 2. if we don't have a left, and we're on the right of our parent, then it's our parent
+ 3. if we don't have a left, and we're on the left of our parent or we have no parent,
+ iterative up the tree until we find the first child who is on the right of its parent; then it's the parent
+ */
+
+ /* TRD : we need to ensure the variables we use to decide our action are self-consistent
+ to do this, we make local copies of them all
+ then, if they are all not NULL, we can know they cannot change and we can continue
+ if however any of them are NULL, they could have changed while we were reading
+ and so our variables could be non-self-consistent
+ to check for this, we issue another processor read barrier
+ and then compare our local variables with the values in the tree
+ if they all match, then we know our variable set is self-consistent
+ (even though it may now be wrong - but we will discover this when we try the atomic operation)
+ */
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ while( load_finished_flag == LFDS710_MISC_FLAG_LOWERED )
+ {
+ left = (*baue)->left;
+ right = (*baue)->right;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ // TRD : optimization - if all already not NULL, given we're add-only, they won't change
+ if( left != NULL and right != NULL and (up == NULL or (up != NULL and up_left != NULL and up_right != NULL)) )
+ break;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( left == (*baue)->left and right == (*baue)->right and (up == NULL or (up != NULL and up == (*baue)->up and up_left == (*baue)->up->left and up_right == (*baue)->up->right)) )
+ load_finished_flag = LFDS710_MISC_FLAG_RAISED;
+ }
+
+ if( left != NULL )
+ action = LFDS710_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD;
+
+ if( left == NULL and up != NULL and up_right == *baue )
+ action = LFDS710_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (left == NULL and up == NULL) or (up != NULL and up_left == *baue and left == NULL) )
+ action = LFDS710_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LFDS710_BTREE_AU_MOVE_INVALID:
+ case LFDS710_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ // TRD : eliminates a compiler warning
+ break;
+
+ case LFDS710_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ *baue = left;
+ if( *baue != NULL )
+ {
+ LFDS710_MISC_BARRIER_LOAD;
+ while( (*baue)->right != NULL )
+ {
+ *baue = (*baue)->right;
+ LFDS710_MISC_BARRIER_LOAD;
+ }
+ }
+ break;
+
+ case LFDS710_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LFDS710_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LFDS710_MISC_FLAG_LOWERED )
+ {
+ load_finished_flag = LFDS710_MISC_FLAG_LOWERED;
+
+ while( load_finished_flag == LFDS710_MISC_FLAG_LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_left = (*baue)->up->left;
+
+ // TRD : optimization - if all already not NULL, given we're add-only, they won't change
+ if( up == NULL or (up != NULL and up_left != NULL) )
+ break;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( up == (*baue)->up and up_left == (*baue)->up->left )
+ load_finished_flag = LFDS710_MISC_FLAG_RAISED;
+ }
+
+ if( *baue != NULL and up != NULL and *baue == up_left )
+ *baue = up;
+ else
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ }
+
+ *baue = up;
+
+ /*
+
+ while( *baue != NULL and (*baue)->up != NULL and *baue == (*baue)->up->left )
+ *baue = (*baue)->up;
+
+ *baue = (*baue)->up;
+
+ */
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds710_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct lfds710_btree_au_element **baue )
+{
+ enum lfds710_btree_au_move
+ action = LFDS710_BTREE_AU_MOVE_INVALID;
+
+ enum lfds710_misc_flag
+ finished_flag = LFDS710_MISC_FLAG_LOWERED,
+ load_finished_flag = LFDS710_MISC_FLAG_LOWERED;
+
+ struct lfds710_btree_au_element
+ *left = NULL,
+ *right = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ /* TRD : from any given element, the next largest element is;
+ 1. if we have a right, it's the smallest element on the left branch of our right child
+ 2. if we don't have a right, and we're on the left of our parent, then it's our parent
+ 3. if we don't have a right, and we're on the right of our parent or we have no parent,
+ iterate up the tree until we find the first child who is on the left of its parent; then it's the parent
+ */
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ while( load_finished_flag == LFDS710_MISC_FLAG_LOWERED )
+ {
+ left = (*baue)->left;
+ right = (*baue)->right;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ // TRD : optimization - if all already not NULL, given we're add-only, they won't change
+ if( left != NULL and right != NULL and (up == NULL or (up != NULL and up_left != NULL and up_right != NULL)) )
+ break;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( left == (*baue)->left and right == (*baue)->right and (up == NULL or (up != NULL and up == (*baue)->up and up_left == (*baue)->up->left and up_right == (*baue)->up->right)) )
+ load_finished_flag = LFDS710_MISC_FLAG_RAISED;
+ }
+
+ if( right != NULL )
+ action = LFDS710_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD;
+
+ if( right == NULL and up != NULL and up_left == *baue )
+ action = LFDS710_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (right == NULL and up == NULL) or (up != NULL and up_right == *baue and right == NULL) )
+ action = LFDS710_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LFDS710_BTREE_AU_MOVE_INVALID:
+ case LFDS710_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ // TRD : remove compiler warning
+ break;
+
+ case LFDS710_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ *baue = right;
+ if( *baue != NULL )
+ {
+ LFDS710_MISC_BARRIER_LOAD;
+ while( (*baue)->left != NULL )
+ {
+ *baue = (*baue)->left;
+ LFDS710_MISC_BARRIER_LOAD;
+ }
+ }
+ break;
+
+ case LFDS710_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LFDS710_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LFDS710_MISC_FLAG_LOWERED )
+ {
+ load_finished_flag = LFDS710_MISC_FLAG_LOWERED;
+
+ while( load_finished_flag == LFDS710_MISC_FLAG_LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_right = (*baue)->up->right;
+
+ // TRD : optimization - if all already not NULL, given we're add-only, they won't change
+ if( up == NULL or (up != NULL and up_right != NULL) )
+ break;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( up == (*baue)->up and up_right == (*baue)->up->right )
+ load_finished_flag = LFDS710_MISC_FLAG_RAISED;
+ }
+
+ if( *baue != NULL and up != NULL and *baue == up_right )
+ *baue = up;
+ else
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ }
+
+ *baue = up;
+
+ /*
+
+ while( *baue != NULL and (*baue)->up != NULL and *baue == (*baue)->up->right )
+ *baue = (*baue)->up;
+
+ *baue = (*baue)->up;
+
+ */
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position( struct lfds710_btree_au_state *baus,
+ struct lfds710_btree_au_element **baue,
+ enum lfds710_btree_au_absolute_position absolute_position,
+ enum lfds710_btree_au_relative_position relative_position )
+{
+ int
+ rv;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD: absolute_position can be any value in its range
+ // TRD: relative_position can be any value in its range
+
+ if( *baue == NULL )
+ rv = lfds710_btree_au_get_by_absolute_position( baus, baue, absolute_position );
+ else
+ rv = lfds710_btree_au_get_by_relative_position( baue, relative_position );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_btree_addonly_unbalanced_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_btree_au_init_valid_on_current_logical_core( struct lfds710_btree_au_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum lfds710_btree_au_existing_key existing_key,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &baus->root % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ LFDS710_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ baus->root = NULL;
+ baus->key_compare_function = key_compare_function;
+ baus->existing_key = existing_key;
+ baus->user_state = user_state;
+
+ lfds710_misc_internal_backoff_init( &baus->insert_backoff );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_btree_addonly_unbalanced_internal.h"
+
+
+
+
+
+/****************************************************************************/
+enum lfds710_btree_au_insert_result lfds710_btree_au_insert( struct lfds710_btree_au_state *baus,
+ struct lfds710_btree_au_element *baue,
+ struct lfds710_btree_au_element **existing_baue )
+{
+ char unsigned
+ result = 0;
+
+ int
+ compare_result = 0;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE;
+
+ struct lfds710_btree_au_element
+ *compare = NULL,
+ *volatile baue_next = NULL,
+ *volatile baue_parent = NULL,
+ *volatile baue_temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &baue->left % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &baue->right % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &baue->up % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &baue->value % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ // TRD : existing_baue can be NULL
+
+ /* TRD : we follow a normal search for the insert node and which side to insert
+
+ the difference is that insertion may fail because someone else inserts
+ there before we do
+
+ in this case, we resume searching for the insert node from the node
+ we were attempting to insert upon
+
+ (if we attempted to insert the root node and this failed, i.e. we thought
+ the tree was empty but then it wasn't, then we start searching from the
+ new root)
+ */
+
+ baue->up = baue->left = baue->right = NULL;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ baue_temp = baus->root;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ while( result == 0 )
+ {
+ // TRD : first we find where to insert
+ while( baue_temp != NULL )
+ {
+ compare_result = baus->key_compare_function( baue->key, baue_temp->key );
+
+ if( compare_result == 0 )
+ {
+ if( existing_baue != NULL )
+ *existing_baue = baue_temp;
+
+ switch( baus->existing_key )
+ {
+ case LFDS710_BTREE_AU_EXISTING_KEY_OVERWRITE:
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( *baue_temp, baue->value );
+ return LFDS710_BTREE_AU_INSERT_RESULT_SUCCESS_OVERWRITE;
+ break;
+
+ case LFDS710_BTREE_AU_EXISTING_KEY_FAIL:
+ return LFDS710_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY;
+ break;
+ }
+ }
+
+ if( compare_result < 0 )
+ baue_next = baue_temp->left;
+
+ if( compare_result > 0 )
+ baue_next = baue_temp->right;
+
+ baue_parent = baue_temp;
+ baue_temp = baue_next;
+ if( baue_temp != NULL )
+ LFDS710_MISC_BARRIER_LOAD;
+ }
+
+ /* TRD : second, we actually insert
+
+ at this point baue_temp has come to NULL
+ and baue_parent is the element to insert at
+ and result of the last compare indicates
+ the direction of insertion
+
+ it may be that another tree has already inserted an element with
+ the same key as ourselves, or other elements which mean our position
+ is now wrong
+
+ in this case, it is either inserted in the position we're trying
+ to insert in now, in which case our insert will fail
+
+ or, similarly, other elements will have come in where we are,
+ and our insert will fail
+ */
+
+ if( baue_parent == NULL )
+ {
+ compare = NULL;
+ baue->up = baus->root;
+ LFDS710_MISC_BARRIER_STORE;
+ LFDS710_PAL_ATOMIC_CAS( &baus->root, &compare, baue, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+
+ if( result == 0 )
+ baue_temp = baus->root;
+ }
+
+ if( baue_parent != NULL )
+ {
+ if( compare_result <= 0 )
+ {
+ compare = NULL;
+ baue->up = baue_parent;
+ LFDS710_MISC_BARRIER_STORE;
+ LFDS710_PAL_ATOMIC_CAS( &baue_parent->left, &compare, baue, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+ }
+
+ if( compare_result > 0 )
+ {
+ compare = NULL;
+ baue->up = baue_parent;
+ LFDS710_MISC_BARRIER_STORE;
+ LFDS710_PAL_ATOMIC_CAS( &baue_parent->right, &compare, baue, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+ }
+
+ // TRD : if the insert fails, then resume searching at the insert node
+ if( result == 0 )
+ baue_temp = baue_parent;
+ }
+
+ if( result == 0 )
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( baus->insert_backoff, backoff_iteration );
+ }
+
+ LFDS710_BACKOFF_AUTOTUNE( baus->insert_backoff, backoff_iteration );
+
+ // TRD : if we get to here, we added (not failed or overwrite on exist) a new element
+ if( existing_baue != NULL )
+ *existing_baue = NULL;
+
+ return LFDS710_BTREE_AU_INSERT_RESULT_SUCCESS;
+}
+
--- /dev/null
+/***** the library-wide header file *****/
+#include "../liblfds710_internal.h"
+
+/***** enums *****/
+enum lfds710_btree_au_move
+{
+ LFDS710_BTREE_AU_MOVE_INVALID,
+ LFDS710_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD,
+ LFDS710_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD,
+ LFDS710_BTREE_AU_MOVE_GET_PARENT,
+ LFDS710_BTREE_AU_MOVE_MOVE_UP_TREE
+};
+
+enum lfds710_btree_au_delete_action
+{
+ LFDS710_BTREE_AU_DELETE_SELF,
+ LFDS710_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD,
+ LFDS710_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD,
+ LFDS710_BTREE_AU_DELETE_MOVE_LEFT
+};
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_btree_addonly_unbalanced_internal.h"
+
+/***** private prototypes *****/
+static void lfds710_btree_au_internal_validate( struct lfds710_btree_au_state *abs, struct lfds710_misc_validation_info *vi, enum lfds710_misc_validity *lfds710_btree_au_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds710_btree_au_query( struct lfds710_btree_au_state *baus,
+ enum lfds710_btree_au_query query_type,
+ void *query_input,
+ void *query_output )
+{
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : query_type can be any value in its range
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS710_BTREE_AU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT:
+ {
+ struct lfds710_btree_au_element
+ *baue = NULL;
+
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ *(lfds710_pal_uint_t *) query_output = 0;
+
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(baus, &baue, LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ ( *(lfds710_pal_uint_t *) query_output )++;
+ }
+ break;
+
+ case LFDS710_BTREE_AU_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ lfds710_btree_au_internal_validate( baus, (struct lfds710_misc_validation_info *) query_input, (enum lfds710_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds710_btree_au_internal_validate( struct lfds710_btree_au_state *baus,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_btree_au_validity )
+{
+ lfds710_pal_uint_t
+ number_elements_from_query_tree = 0,
+ number_elements_from_walk = 0;
+
+ struct lfds710_btree_au_element
+ *baue = NULL,
+ *baue_prev = NULL;
+
+ LFDS710_PAL_ASSERT( baus!= NULL );
+ // TRD : vi can be NULL
+ LFDS710_PAL_ASSERT( lfds710_btree_au_validity != NULL );
+
+ *lfds710_btree_au_validity = LFDS710_MISC_VALIDITY_VALID;
+
+ /* TRD : validation is performed by;
+
+ performing an in-order walk
+ we should see every element is larger than the preceeding element
+ we count elements as we go along (visited elements, that is)
+ and check our tally equals the expected count
+ */
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(baus, &baue, LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ // TRD : baue_prev should always be smaller than or equal to baue
+ if( baue_prev != NULL )
+ if( baus->key_compare_function(baue_prev->key, baue->key) > 0 )
+ {
+ *lfds710_btree_au_validity = LFDS710_MISC_VALIDITY_INVALID_ORDER;
+ return;
+ }
+
+ baue_prev = baue;
+ number_elements_from_walk++;
+ }
+
+ if( *lfds710_btree_au_validity == LFDS710_MISC_VALIDITY_VALID )
+ {
+ lfds710_btree_au_query( (struct lfds710_btree_au_state *) baus, LFDS710_BTREE_AU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, &number_elements_from_query_tree );
+
+ if( number_elements_from_walk > number_elements_from_query_tree )
+ *lfds710_btree_au_validity = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( number_elements_from_walk < number_elements_from_query_tree )
+ *lfds710_btree_au_validity = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+ }
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds710_btree_au_validity == LFDS710_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ lfds710_btree_au_query( (struct lfds710_btree_au_state *) baus, LFDS710_BTREE_AU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, &number_elements_from_query_tree );
+
+ if( number_elements_from_query_tree < vi->min_elements )
+ *lfds710_btree_au_validity = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements_from_query_tree > vi->max_elements )
+ *lfds710_btree_au_validity = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_freelist_cleanup( struct lfds710_freelist_state *fs,
+ void (*element_cleanup_callback)(struct lfds710_freelist_state *fs, struct lfds710_freelist_element *fe) )
+{
+ struct lfds710_freelist_element
+ *fe,
+ *fe_temp;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ {
+ fe = fs->top[POINTER];
+
+ while( fe != NULL )
+ {
+ fe_temp = fe;
+ fe = fe->next;
+
+ element_cleanup_callback( fs, fe_temp );
+ }
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_freelist_init_valid_on_current_logical_core( struct lfds710_freelist_state *fs,
+ struct lfds710_freelist_element * volatile (*elimination_array)[LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS],
+ lfds710_pal_uint_t elimination_array_size_in_elements,
+ void *user_state )
+{
+ lfds710_pal_uint_t
+ loop,
+ subloop;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) fs->top % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &fs->elimination_array_size_in_elements % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ // TRD : elimination_array can be NULL
+ LFDS710_PAL_ASSERT( (elimination_array == NULL) or
+ ( (elimination_array != NULL) and (lfds710_pal_uint_t) elimination_array % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 ) );
+ LFDS710_PAL_ASSERT( (elimination_array == NULL and elimination_array_size_in_elements == 0) or
+ (elimination_array != NULL and elimination_array_size_in_elements >= 2 and (elimination_array_size_in_elements & (elimination_array_size_in_elements-1)) == 0) );
+ // TRD : user_state can be NULL
+
+ fs->top[POINTER] = NULL;
+ fs->top[COUNTER] = 0;
+
+ fs->elimination_array = elimination_array;
+ fs->elimination_array_size_in_elements = elimination_array_size_in_elements;
+ fs->user_state = user_state;
+
+ for( loop = 0 ; loop < elimination_array_size_in_elements ; loop++ )
+ for( subloop = 0 ; subloop < LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS ; subloop++ )
+ fs->elimination_array[loop][subloop] = NULL;
+
+ lfds710_misc_internal_backoff_init( &fs->pop_backoff );
+ lfds710_misc_internal_backoff_init( &fs->push_backoff );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds710_internal.h"
+
+/***** private prototypes *****/
+void lfds710_freelist_internal_push_without_ea( struct lfds710_freelist_state *fs,
+ struct lfds710_freelist_element *fe );
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds710_freelist_pop( struct lfds710_freelist_state *fs,
+ struct lfds710_freelist_element **fe,
+ struct lfds710_prng_st_state *psts )
+{
+ char unsigned
+ result;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE,
+ elimination_array_index,
+ loop,
+ random_value;
+
+ struct lfds710_freelist_element LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_DOUBLE_POINTER)
+ *new_top[PAC_SIZE],
+ *volatile original_top[PAC_SIZE];
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+ // TRD : psts can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( fs->elimination_array_size_in_elements > 0 )
+ {
+ if( psts != NULL )
+ {
+ LFDS710_PRNG_ST_GENERATE( *psts, random_value );
+ elimination_array_index = ( random_value & (fs->elimination_array_size_in_elements-1) );
+ }
+ else
+ {
+ elimination_array_index = (lfds710_pal_uint_t) fe;
+ LFDS710_PRNG_ST_MIXING_FUNCTION( elimination_array_index );
+ elimination_array_index = ( elimination_array_index & (fs->elimination_array_size_in_elements-1) );
+ }
+
+ // TRD : full scan of one cache line, max pointers per cache line
+
+ *fe = NULL;
+
+ for( loop = 0 ; loop < LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS ; loop++ )
+ if( fs->elimination_array[elimination_array_index][loop] != NULL )
+ {
+ LFDS710_PAL_ATOMIC_EXCHANGE( &fs->elimination_array[elimination_array_index][loop], *fe, struct lfds710_freelist_element * );
+ if( *fe != NULL )
+ return 1;
+ }
+ }
+
+ original_top[COUNTER] = fs->top[COUNTER];
+ original_top[POINTER] = fs->top[POINTER];
+
+ do
+ {
+ if( original_top[POINTER] == NULL )
+ {
+ *fe = NULL;
+ return 0;
+ }
+
+ new_top[COUNTER] = original_top[COUNTER] + 1;
+ new_top[POINTER] = original_top[POINTER]->next;
+
+ LFDS710_PAL_ATOMIC_DWCAS( fs->top, original_top, new_top, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+
+ if( result == 0 )
+ {
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( fs->pop_backoff, backoff_iteration );
+ LFDS710_MISC_BARRIER_LOAD;
+ }
+ }
+ while( result == 0 );
+
+ *fe = original_top[POINTER];
+
+ LFDS710_BACKOFF_AUTOTUNE( fs->pop_backoff, backoff_iteration );
+
+ return 1;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_freelist_push( struct lfds710_freelist_state *fs,
+ struct lfds710_freelist_element *fe,
+ struct lfds710_prng_st_state *psts )
+{
+ char unsigned
+ result;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE,
+ elimination_array_index,
+ loop,
+ random_value;
+
+ struct lfds710_freelist_element LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_DOUBLE_POINTER)
+ *new_top[PAC_SIZE],
+ *volatile original_top[PAC_SIZE];
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+ // TRD : psts can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( fs->elimination_array_size_in_elements > 0 )
+ {
+ if( psts != NULL )
+ {
+ LFDS710_PRNG_ST_GENERATE( *psts, random_value );
+ elimination_array_index = ( random_value & (fs->elimination_array_size_in_elements-1) );
+ }
+ else
+ {
+ elimination_array_index = (lfds710_pal_uint_t) fe;
+ LFDS710_PRNG_ST_MIXING_FUNCTION( elimination_array_index );
+ elimination_array_index = ( elimination_array_index & (fs->elimination_array_size_in_elements-1) );
+ }
+
+ // TRD : full scan of one cache line, max pointers per cache line
+
+ for( loop = 0 ; loop < LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS ; loop++ )
+ if( fs->elimination_array[elimination_array_index][loop] == NULL )
+ {
+ LFDS710_PAL_ATOMIC_EXCHANGE( &fs->elimination_array[elimination_array_index][loop], fe, struct lfds710_freelist_element * );
+ if( fe == NULL )
+ return;
+ }
+ }
+
+ new_top[POINTER] = fe;
+
+ original_top[COUNTER] = fs->top[COUNTER];
+ original_top[POINTER] = fs->top[POINTER];
+
+ do
+ {
+ fe->next = original_top[POINTER];
+ LFDS710_MISC_BARRIER_STORE;
+
+ new_top[COUNTER] = original_top[COUNTER] + 1;
+ LFDS710_PAL_ATOMIC_DWCAS( fs->top, original_top, new_top, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+
+ if( result == 0 )
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( fs->push_backoff, backoff_iteration );
+ }
+ while( result == 0 );
+
+ LFDS710_BACKOFF_AUTOTUNE( fs->push_backoff, backoff_iteration );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds710_freelist_internal_push_without_ea( struct lfds710_freelist_state *fs,
+ struct lfds710_freelist_element *fe )
+{
+ char unsigned
+ result;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE;
+
+ struct lfds710_freelist_element LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_DOUBLE_POINTER)
+ *new_top[PAC_SIZE],
+ *volatile original_top[PAC_SIZE];
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ new_top[POINTER] = fe;
+
+ original_top[COUNTER] = fs->top[COUNTER];
+ original_top[POINTER] = fs->top[POINTER];
+
+ do
+ {
+ fe->next = original_top[POINTER];
+ LFDS710_MISC_BARRIER_STORE;
+
+ new_top[COUNTER] = original_top[COUNTER] + 1;
+ LFDS710_PAL_ATOMIC_DWCAS( fs->top, original_top, new_top, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+
+ if( result == 0 )
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( fs->push_backoff, backoff_iteration );
+ }
+ while( result == 0 );
+
+ LFDS710_BACKOFF_AUTOTUNE( fs->push_backoff, backoff_iteration );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_freelist_internal.h"
+
+/***** private prototypes *****/
+static void lfds710_freelist_internal_freelist_validate( struct lfds710_freelist_state *fs,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_freelist_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds710_freelist_query( struct lfds710_freelist_state *fs,
+ enum lfds710_freelist_query query_type,
+ void *query_input,
+ void *query_output )
+{
+ struct lfds710_freelist_element
+ *fe;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ // TRD : query_type can be any value in its range
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS710_FREELIST_QUERY_SINGLETHREADED_GET_COUNT:
+ {
+ lfds710_pal_uint_t
+ loop,
+ subloop;
+
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ *(lfds710_pal_uint_t *) query_output = 0;
+
+ // TRD : count the elements in the elimination array
+ for( loop = 0 ; loop < fs->elimination_array_size_in_elements ; loop++ )
+ for( subloop = 0 ; subloop < LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS ; subloop++ )
+ if( fs->elimination_array[loop][subloop] != NULL )
+ ( *(lfds710_pal_uint_t *) query_output )++;
+
+ // TRD : count the elements on the freelist
+ fe = (struct lfds710_freelist_element *) fs->top[POINTER];
+
+ while( fe != NULL )
+ {
+ ( *(lfds710_pal_uint_t *) query_output )++;
+ fe = (struct lfds710_freelist_element *) fe->next;
+ }
+ }
+ break;
+
+ case LFDS710_FREELIST_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ lfds710_freelist_internal_freelist_validate( fs, (struct lfds710_misc_validation_info *) query_input, (enum lfds710_misc_validity *) query_output );
+ break;
+
+ case LFDS710_FREELIST_QUERY_GET_ELIMINATION_ARRAY_EXTRA_ELEMENTS_IN_FREELIST_ELEMENTS:
+ {
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ ( *(lfds710_pal_uint_t *) query_output ) = (fs->elimination_array_size_in_elements-1) * LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS;
+ }
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds710_freelist_internal_freelist_validate( struct lfds710_freelist_state *fs,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_freelist_validity )
+{
+ lfds710_pal_uint_t
+ number_elements = 0;
+
+ struct lfds710_freelist_element
+ *fe_slow,
+ *fe_fast;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ // TRD : vi can be NULL
+ LFDS710_PAL_ASSERT( lfds710_freelist_validity != NULL );
+
+ *lfds710_freelist_validity = LFDS710_MISC_VALIDITY_VALID;
+
+ fe_slow = fe_fast = (struct lfds710_freelist_element *) fs->top[POINTER];
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the top of the freelist
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the freelist)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( fe_slow != NULL )
+ do
+ {
+ fe_slow = fe_slow->next;
+
+ if( fe_fast != NULL )
+ fe_fast = fe_fast->next;
+
+ if( fe_fast != NULL )
+ fe_fast = fe_fast->next;
+ }
+ while( fe_slow != NULL and fe_fast != fe_slow );
+
+ if( fe_fast != NULL and fe_slow != NULL and fe_fast == fe_slow )
+ *lfds710_freelist_validity = LFDS710_MISC_VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds710_freelist_validity == LFDS710_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ lfds710_freelist_query( fs, LFDS710_FREELIST_QUERY_SINGLETHREADED_GET_COUNT, NULL, (void *) &number_elements );
+
+ if( number_elements < vi->min_elements )
+ *lfds710_freelist_validity = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements > vi->max_elements )
+ *lfds710_freelist_validity = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_hash_addonly_internal.h"
+
+/***** private prototypes*****/
+static void btree_au_element_cleanup_function( struct lfds710_btree_au_state *baus,
+ struct lfds710_btree_au_element *baue );
+
+
+
+
+
+/****************************************************************************/
+void lfds710_hash_a_cleanup( struct lfds710_hash_a_state *has,
+ void (*element_cleanup_callback)(struct lfds710_hash_a_state *has, struct lfds710_hash_a_element *hae) )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ LFDS710_PAL_ASSERT( has != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ if( element_cleanup_callback == NULL )
+ return;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ has->element_cleanup_callback = element_cleanup_callback;
+
+ for( loop = 0 ; loop < has->array_size ; loop++ )
+ lfds710_btree_au_cleanup( has->baus_array+loop, btree_au_element_cleanup_function );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void btree_au_element_cleanup_function( struct lfds710_btree_au_state *baus,
+ struct lfds710_btree_au_element *baue )
+{
+ struct lfds710_hash_a_state
+ *has;
+
+ struct lfds710_hash_a_element
+ *hae;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ hae = (struct lfds710_hash_a_element *) LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *baue );
+ has = (struct lfds710_hash_a_state *) LFDS710_BTREE_AU_GET_USER_STATE_FROM_STATE( *baus );
+
+ has->element_cleanup_callback( has, hae );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_hash_addonly_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds710_hash_a_get_by_key( struct lfds710_hash_a_state *has,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void (*key_hash_function)(void const *key, lfds710_pal_uint_t *hash),
+ void *key,
+ struct lfds710_hash_a_element **hae )
+{
+ int
+ rv;
+
+ lfds710_pal_uint_t
+ hash = 0;
+
+ struct lfds710_btree_au_element
+ *baue;
+
+ LFDS710_PAL_ASSERT( has != NULL );
+ // TRD : key_compare_function can be NULL
+ // TRD : key_hash_function can be NULL
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( hae != NULL );
+
+ if( key_compare_function == NULL )
+ key_compare_function = has->key_compare_function;
+
+ if( key_hash_function == NULL )
+ key_hash_function = has->key_hash_function;
+
+ key_hash_function( key, &hash );
+
+ rv = lfds710_btree_au_get_by_key( has->baus_array + (hash % has->array_size), key_compare_function, key, &baue );
+
+ if( rv == 1 )
+ *hae = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *baue );
+ else
+ *hae = NULL;
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_hash_addonly_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_hash_a_init_valid_on_current_logical_core( struct lfds710_hash_a_state *has,
+ struct lfds710_btree_au_state *baus_array,
+ lfds710_pal_uint_t array_size,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void (*key_hash_function)(void const *key, lfds710_pal_uint_t *hash),
+ enum lfds710_hash_a_existing_key existing_key,
+ void *user_state )
+{
+ enum lfds710_btree_au_existing_key
+ btree_au_existing_key = LFDS710_BTREE_AU_EXISTING_KEY_OVERWRITE; // TRD : for compiler warning
+
+ lfds710_pal_uint_t
+ loop;
+
+ LFDS710_PAL_ASSERT( has != NULL );
+ LFDS710_PAL_ASSERT( baus_array != NULL );
+ LFDS710_PAL_ASSERT( array_size > 0 );
+ LFDS710_PAL_ASSERT( key_compare_function != NULL );
+ LFDS710_PAL_ASSERT( key_hash_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ has->array_size = array_size;
+ has->key_compare_function = key_compare_function;
+ has->key_hash_function = key_hash_function;
+ has->existing_key = existing_key;
+ has->baus_array = baus_array;
+ has->user_state = user_state;
+
+ if( has->existing_key == LFDS710_HASH_A_EXISTING_KEY_OVERWRITE )
+ btree_au_existing_key = LFDS710_BTREE_AU_EXISTING_KEY_OVERWRITE;
+
+ if( has->existing_key == LFDS710_HASH_A_EXISTING_KEY_FAIL )
+ btree_au_existing_key = LFDS710_BTREE_AU_EXISTING_KEY_FAIL;
+
+ // TRD : since the addonly_hash atomic counts, if that flag is set, the btree_addonly_unbalanceds don't have to
+ for( loop = 0 ; loop < array_size ; loop++ )
+ lfds710_btree_au_init_valid_on_current_logical_core( has->baus_array+loop, key_compare_function, btree_au_existing_key, user_state );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_hash_addonly_internal.h"
+
+
+
+
+
+/****************************************************************************/
+enum lfds710_hash_a_insert_result lfds710_hash_a_insert( struct lfds710_hash_a_state *has,
+ struct lfds710_hash_a_element *hae,
+ struct lfds710_hash_a_element **existing_hae )
+{
+ enum lfds710_hash_a_insert_result
+ apr = LFDS710_HASH_A_PUT_RESULT_SUCCESS;
+
+ enum lfds710_btree_au_insert_result
+ alr;
+
+ lfds710_pal_uint_t
+ hash = 0;
+
+ struct lfds710_btree_au_element
+ *existing_baue;
+
+ LFDS710_PAL_ASSERT( has != NULL );
+ LFDS710_PAL_ASSERT( hae != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &hae->value % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ // TRD : existing_hae can be NULL
+
+ // TRD : alignment checks
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &hae->baue % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+
+ has->key_hash_function( hae->key, &hash );
+
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( hae->baue, hae->key );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( hae->baue, hae );
+
+ alr = lfds710_btree_au_insert( has->baus_array + (hash % has->array_size), &hae->baue, &existing_baue );
+
+ switch( alr )
+ {
+ case LFDS710_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY:
+ if( existing_hae != NULL )
+ *existing_hae = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *existing_baue );
+
+ apr = LFDS710_HASH_A_PUT_RESULT_FAILURE_EXISTING_KEY;
+ break;
+
+ case LFDS710_BTREE_AU_INSERT_RESULT_SUCCESS_OVERWRITE:
+ apr = LFDS710_HASH_A_PUT_RESULT_SUCCESS_OVERWRITE;
+ break;
+
+ case LFDS710_BTREE_AU_INSERT_RESULT_SUCCESS:
+ apr = LFDS710_HASH_A_PUT_RESULT_SUCCESS;
+ break;
+ }
+
+ return apr;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds710_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_hash_addonly_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_hash_a_iterate_init( struct lfds710_hash_a_state *has,
+ struct lfds710_hash_a_iterate *hai )
+{
+ LFDS710_PAL_ASSERT( has != NULL );
+ LFDS710_PAL_ASSERT( hai != NULL );
+
+ hai->baus = has->baus_array;
+ hai->baus_end = has->baus_array + has->array_size;
+ hai->baue = NULL;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int lfds710_hash_a_iterate( struct lfds710_hash_a_iterate *hai,
+ struct lfds710_hash_a_element **hae )
+{
+ enum lfds710_misc_flag
+ finished_flag = LFDS710_MISC_FLAG_LOWERED;
+
+ int
+ rv = 0;
+
+ LFDS710_PAL_ASSERT( hai != NULL );
+ LFDS710_PAL_ASSERT( hae != NULL );
+
+ while( finished_flag == LFDS710_MISC_FLAG_LOWERED )
+ {
+ lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position( hai->baus, &hai->baue, LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE );
+
+ if( hai->baue != NULL )
+ {
+ *hae = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *hai->baue );
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ rv = 1;
+ }
+
+ if( hai->baue == NULL )
+ if( ++hai->baus == hai->baus_end )
+ {
+ *hae = NULL;
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ }
+ }
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_hash_addonly_internal.h"
+
+/***** private prototypes *****/
+static void lfds710_hash_a_internal_validate( struct lfds710_hash_a_state *has,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_hash_a_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds710_hash_a_query( struct lfds710_hash_a_state *has,
+ enum lfds710_hash_a_query query_type,
+ void *query_input,
+ void *query_output )
+{
+ LFDS710_PAL_ASSERT( has != NULL );
+ // TRD : query_type can be any value in its range
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS710_HASH_A_QUERY_GET_POTENTIALLY_INACCURATE_COUNT:
+ {
+ struct lfds710_hash_a_iterate
+ ai;
+
+ struct lfds710_hash_a_element
+ *hae;
+
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ *(lfds710_pal_uint_t *) query_output = 0;
+
+ lfds710_hash_a_iterate_init( has, &ai );
+
+ while( lfds710_hash_a_iterate(&ai, &hae) )
+ ( *(lfds710_pal_uint_t *) query_output )++;
+ }
+ break;
+
+ case LFDS710_HASH_A_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD: query_input can be any value in its range
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ lfds710_hash_a_internal_validate( has, (struct lfds710_misc_validation_info *) query_input, (enum lfds710_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds710_hash_a_internal_validate( struct lfds710_hash_a_state *has,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_hash_a_validity )
+{
+ lfds710_pal_uint_t
+ lfds710_hash_a_total_number_elements = 0,
+ lfds710_btree_au_total_number_elements = 0,
+ number_elements;
+
+ lfds710_pal_uint_t
+ loop;
+
+ LFDS710_PAL_ASSERT( has!= NULL );
+ // TRD : vi can be NULL
+ LFDS710_PAL_ASSERT( lfds710_hash_a_validity != NULL );
+
+ /* TRD : validate every btree_addonly_unbalanced in the addonly_hash
+ sum elements in each btree_addonly_unbalanced
+ check matches expected element counts (if vi is provided)
+ */
+
+ *lfds710_hash_a_validity = LFDS710_MISC_VALIDITY_VALID;
+
+ for( loop = 0 ; *lfds710_hash_a_validity == LFDS710_MISC_VALIDITY_VALID and loop < has->array_size ; loop++ )
+ lfds710_btree_au_query( has->baus_array+loop, LFDS710_BTREE_AU_QUERY_SINGLETHREADED_VALIDATE, NULL, (void *) lfds710_hash_a_validity );
+
+ if( *lfds710_hash_a_validity == LFDS710_MISC_VALIDITY_VALID )
+ {
+ for( loop = 0 ; loop < has->array_size ; loop++ )
+ {
+ lfds710_btree_au_query( has->baus_array+loop, LFDS710_BTREE_AU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_elements );
+ lfds710_btree_au_total_number_elements += number_elements;
+ }
+
+ // TRD : first, check btree_addonly_unbalanced total vs the addonly_hash total
+ lfds710_hash_a_query( has, LFDS710_HASH_A_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, &lfds710_hash_a_total_number_elements );
+
+ // TRD : the btree_addonly_unbalanceds are assumed to speak the truth
+ if( lfds710_hash_a_total_number_elements < lfds710_btree_au_total_number_elements )
+ *lfds710_hash_a_validity = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( lfds710_hash_a_total_number_elements > lfds710_btree_au_total_number_elements )
+ *lfds710_hash_a_validity = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ // TRD : second, if we're still valid and vi is provided, check the btree_addonly_unbalanced total against vi
+ if( *lfds710_hash_a_validity == LFDS710_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ if( lfds710_btree_au_total_number_elements < vi->min_elements )
+ *lfds710_hash_a_validity = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( lfds710_btree_au_total_number_elements > vi->max_elements )
+ *lfds710_hash_a_validity = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_list_addonly_singlylinked_ordered_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_list_aso_cleanup( struct lfds710_list_aso_state *lasos,
+ void (*element_cleanup_callback)(struct lfds710_list_aso_state *lasos, struct lfds710_list_aso_element *lasoe) )
+{
+ struct lfds710_list_aso_element
+ *lasoe,
+ *temp;
+
+ LFDS710_PAL_ASSERT( lasos != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback == NULL )
+ return;
+
+ lasoe = LFDS710_LIST_ASO_GET_START( *lasos );
+
+ while( lasoe != NULL )
+ {
+ temp = lasoe;
+
+ lasoe = LFDS710_LIST_ASO_GET_NEXT( *lasoe );
+
+ element_cleanup_callback( lasos, temp );
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_list_addonly_singlylinked_ordered_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds710_list_aso_get_by_key( struct lfds710_list_aso_state *lasos,
+ void *key,
+ struct lfds710_list_aso_element **lasoe )
+{
+ int
+ cr = !0,
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( lasos != NULL );
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( lasoe != NULL );
+
+ while( cr != 0 and LFDS710_LIST_ASO_GET_START_AND_THEN_NEXT(*lasos, *lasoe) )
+ cr = lasos->key_compare_function( key, (*lasoe)->key );
+
+ if( *lasoe == NULL )
+ rv = 0;
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_list_addonly_singlylinked_ordered_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_list_aso_init_valid_on_current_logical_core( struct lfds710_list_aso_state *lasos,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum lfds710_list_aso_existing_key existing_key,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( lasos != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasos->dummy_element % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasos->start % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS710_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ // TRD : dummy start element - makes code easier when you can always use ->next
+ lasos->start = &lasos->dummy_element;
+
+ lasos->start->next = NULL;
+ lasos->start->value = NULL;
+ lasos->key_compare_function = key_compare_function;
+ lasos->existing_key = existing_key;
+ lasos->user_state = user_state;
+
+ lfds710_misc_internal_backoff_init( &lasos->insert_backoff );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_list_addonly_singlylinked_ordered_internal.h"
+
+
+
+
+
+/****************************************************************************/
+enum lfds710_list_aso_insert_result lfds710_list_aso_insert( struct lfds710_list_aso_state *lasos,
+ struct lfds710_list_aso_element *lasoe,
+ struct lfds710_list_aso_element **existing_lasoe )
+{
+ char unsigned
+ result;
+
+ enum lfds710_misc_flag
+ finished_flag = LFDS710_MISC_FLAG_LOWERED;
+
+ int
+ compare_result = 0;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE;
+
+ struct lfds710_list_aso_element
+ *volatile lasoe_temp = NULL,
+ *volatile lasoe_trailing;
+
+ LFDS710_PAL_ASSERT( lasos != NULL );
+ LFDS710_PAL_ASSERT( lasoe != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasoe->next % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasoe->value % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ // TRD : existing_lasoe can be NULL
+
+ /* TRD : imagine a list, sorted small to large
+
+ we arrive at an element
+ we obtain its next pointer
+ we check we are greater than the current element and smaller than the next element
+ this means we have found the correct location to insert
+ we try to CAS ourselves in; in the meantime,
+ someone else has *aready* swapped in an element which is smaller than we are
+
+ e.g.
+
+ the list is { 1, 10 } and we are the value 5
+
+ we arrive at 1; we check the next element and see it is 10
+ so we are larger than the current element and smaller than the next
+ we are in the correct location to insert and we go to insert...
+
+ in the meantime, someone else with the value 3 comes along
+ he too finds this is the correct location and inserts before we do
+ the list is now { 1, 3, 10 } and we are trying to insert now after
+ 1 and before 3!
+
+ our insert CAS fails, because the next pointer of 1 has changed aready;
+ but we see we are in the wrong location - we need to move forward an
+ element
+ */
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ /* TRD : we need to begin with the leading dummy element
+ as the element to be inserted
+ may be smaller than all elements in the list
+ */
+
+ lasoe_trailing = lasos->start;
+ lasoe_temp = lasos->start->next;
+
+ while( finished_flag == LFDS710_MISC_FLAG_LOWERED )
+ {
+ if( lasoe_temp == NULL )
+ compare_result = -1;
+
+ if( lasoe_temp != NULL )
+ {
+ LFDS710_MISC_BARRIER_LOAD;
+ compare_result = lasos->key_compare_function( lasoe->key, lasoe_temp->key );
+ }
+
+ if( compare_result == 0 )
+ {
+ if( existing_lasoe != NULL )
+ *existing_lasoe = lasoe_temp;
+
+ switch( lasos->existing_key )
+ {
+ case LFDS710_LIST_ASO_EXISTING_KEY_OVERWRITE:
+ LFDS710_LIST_ASO_SET_VALUE_IN_ELEMENT( *lasoe_temp, lasoe->value );
+ return LFDS710_LIST_ASO_INSERT_RESULT_SUCCESS_OVERWRITE;
+ break;
+
+ case LFDS710_LIST_ASO_EXISTING_KEY_FAIL:
+ return LFDS710_LIST_ASO_INSERT_RESULT_FAILURE_EXISTING_KEY;
+ break;
+ }
+
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ }
+
+ if( compare_result < 0 )
+ {
+ lasoe->next = lasoe_temp;
+ LFDS710_MISC_BARRIER_STORE;
+ LFDS710_PAL_ATOMIC_CAS( &lasoe_trailing->next, (struct lfds710_list_aso_element **) &lasoe->next, lasoe, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+
+ if( result == 1 )
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ else
+ {
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( lasos->insert_backoff, backoff_iteration );
+ // TRD : if we fail to link, someone else has linked and so we need to redetermine our position is correct
+ lasoe_temp = lasoe_trailing->next;
+ }
+ }
+
+ if( compare_result > 0 )
+ {
+ // TRD : move trailing along by one element
+ lasoe_trailing = lasoe_trailing->next;
+
+ /* TRD : set temp as the element after trailing
+ if the new element we're linking is larger than all elements in the list,
+ lasoe_temp will now go to NULL and we'll link at the end
+ */
+ lasoe_temp = lasoe_trailing->next;
+ }
+ }
+
+ LFDS710_BACKOFF_AUTOTUNE( lasos->insert_backoff, backoff_iteration );
+
+ return LFDS710_LIST_ASO_INSERT_RESULT_SUCCESS;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds710_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_list_addonly_singlylinked_ordered_internal.h"
+
+/***** private prototypes *****/
+static void lfds710_list_aso_internal_validate( struct lfds710_list_aso_state *lasos,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_list_aso_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds710_list_aso_query( struct lfds710_list_aso_state *lasos,
+ enum lfds710_list_aso_query query_type,
+ void *query_input,
+ void *query_output )
+{
+ LFDS710_PAL_ASSERT( lasos != NULL );
+ // TRD : query_type can be any value in its range
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT:
+ {
+ struct lfds710_list_aso_element
+ *lasoe = NULL;
+
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ *(lfds710_pal_uint_t *) query_output = 0;
+
+ while( LFDS710_LIST_ASO_GET_START_AND_THEN_NEXT(*lasos, lasoe) )
+ ( *(lfds710_pal_uint_t *) query_output )++;
+ }
+ break;
+
+ case LFDS710_LIST_ASO_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ lfds710_list_aso_internal_validate( lasos, (struct lfds710_misc_validation_info *) query_input, (enum lfds710_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+
+/****************************************************************************/
+static void lfds710_list_aso_internal_validate( struct lfds710_list_aso_state *lasos,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_list_aso_validity )
+{
+ lfds710_pal_uint_t
+ number_elements = 0;
+
+ struct lfds710_list_aso_element
+ *lasoe_fast,
+ *lasoe_slow;
+
+ LFDS710_PAL_ASSERT( lasos!= NULL );
+ // TRD : vi can be NULL
+ LFDS710_PAL_ASSERT( lfds710_list_aso_validity != NULL );
+
+ *lfds710_list_aso_validity = LFDS710_MISC_VALIDITY_VALID;
+
+ lasoe_slow = lasoe_fast = lasos->start->next;
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the start of the list
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the queue)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( lasoe_slow != NULL )
+ do
+ {
+ lasoe_slow = lasoe_slow->next;
+
+ if( lasoe_fast != NULL )
+ lasoe_fast = lasoe_fast->next;
+
+ if( lasoe_fast != NULL )
+ lasoe_fast = lasoe_fast->next;
+ }
+ while( lasoe_slow != NULL and lasoe_fast != lasoe_slow );
+
+ if( lasoe_fast != NULL and lasoe_slow != NULL and lasoe_fast == lasoe_slow )
+ *lfds710_list_aso_validity = LFDS710_MISC_VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds710_list_aso_validity == LFDS710_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ lfds710_list_aso_query( lasos, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, &number_elements );
+
+ if( number_elements < vi->min_elements )
+ *lfds710_list_aso_validity = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements > vi->max_elements )
+ *lfds710_list_aso_validity = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_list_addonly_singlylinked_unordered_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_list_asu_cleanup( struct lfds710_list_asu_state *lasus,
+ void (*element_cleanup_callback)(struct lfds710_list_asu_state *lasus, struct lfds710_list_asu_element *lasue) )
+{
+ struct lfds710_list_asu_element
+ *lasue,
+ *temp;
+
+ LFDS710_PAL_ASSERT( lasus != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback == NULL )
+ return;
+
+ lasue = LFDS710_LIST_ASU_GET_START( *lasus );
+
+ while( lasue != NULL )
+ {
+ temp = lasue;
+
+ lasue = LFDS710_LIST_ASU_GET_NEXT( *lasue );
+
+ element_cleanup_callback( lasus, temp );
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_list_addonly_singlylinked_unordered_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds710_list_asu_get_by_key( struct lfds710_list_asu_state *lasus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct lfds710_list_asu_element **lasue )
+{
+ int
+ cr = !0,
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( lasus != NULL );
+ LFDS710_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( lasue != NULL );
+
+ *lasue = NULL;
+
+ while( cr != 0 and LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*lasus, *lasue) )
+ cr = key_compare_function( key, (*lasue)->key );
+
+ if( *lasue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_list_addonly_singlylinked_unordered_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_list_asu_init_valid_on_current_logical_core( struct lfds710_list_asu_state *lasus,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( lasus != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasus->dummy_element % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasus->end % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasus->start % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ // TRD : user_state can be NULL
+
+ // TRD : dummy start element - makes code easier when you can always use ->next
+ lasus->start = lasus->end = &lasus->dummy_element;
+
+ lasus->start->next = NULL;
+ lasus->start->value = NULL;
+ lasus->user_state = user_state;
+
+ lfds710_misc_internal_backoff_init( &lasus->after_backoff );
+ lfds710_misc_internal_backoff_init( &lasus->start_backoff );
+ lfds710_misc_internal_backoff_init( &lasus->end_backoff );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_list_addonly_singlylinked_unordered_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_list_asu_insert_at_position( struct lfds710_list_asu_state *lasus,
+ struct lfds710_list_asu_element *lasue,
+ struct lfds710_list_asu_element *lasue_predecessor,
+ enum lfds710_list_asu_position position )
+{
+ LFDS710_PAL_ASSERT( lasus != NULL );
+ LFDS710_PAL_ASSERT( lasue != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasue->next % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasue->value % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ // TRD : lasue_predecessor asserted in the switch
+ // TRD : position can be any value in its range
+
+ switch( position )
+ {
+ case LFDS710_LIST_ASU_POSITION_START:
+ lfds710_list_asu_insert_at_start( lasus, lasue );
+ break;
+
+ case LFDS710_LIST_ASU_POSITION_END:
+ lfds710_list_asu_insert_at_end( lasus, lasue );
+ break;
+
+ case LFDS710_LIST_ASU_POSITION_AFTER:
+ lfds710_list_asu_insert_after_element( lasus, lasue, lasue_predecessor );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds710_list_asu_insert_at_start( struct lfds710_list_asu_state *lasus,
+ struct lfds710_list_asu_element *lasue )
+{
+ char unsigned
+ result;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE;
+
+ LFDS710_PAL_ASSERT( lasus != NULL );
+ LFDS710_PAL_ASSERT( lasue != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasue->next % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasue->value % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ lasue->next = lasus->start->next;
+
+ do
+ {
+ LFDS710_MISC_BARRIER_STORE;
+ LFDS710_PAL_ATOMIC_CAS( &lasus->start->next, (struct lfds710_list_asu_element **) &lasue->next, lasue, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+ if( result == 0 )
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( lasus->start_backoff, backoff_iteration );
+ }
+ while( result == 0 );
+
+ LFDS710_BACKOFF_AUTOTUNE( lasus->start_backoff, backoff_iteration );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds710_list_asu_insert_at_end( struct lfds710_list_asu_state *lasus,
+ struct lfds710_list_asu_element *lasue )
+{
+ char unsigned
+ result;
+
+ enum lfds710_misc_flag
+ finished_flag = LFDS710_MISC_FLAG_LOWERED;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE;
+
+ struct lfds710_list_asu_element LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_SINGLE_POINTER)
+ *compare;
+
+ struct lfds710_list_asu_element
+ *volatile lasue_next,
+ *volatile lasue_end;
+
+ LFDS710_PAL_ASSERT( lasus != NULL );
+ LFDS710_PAL_ASSERT( lasue != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasue->next % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasue->value % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+
+ /* TRD : begin by assuming end is correctly pointing to the final element
+ try to link (comparing for next being NULL)
+ if we fail, move down list till we find last element
+ and retry
+ when successful, update end to ourselves
+
+ note there's a leading dummy element
+ so lasus->end always points to an element
+ */
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ lasue->next = NULL;
+ lasue_end = lasus->end;
+
+ while( finished_flag == LFDS710_MISC_FLAG_LOWERED )
+ {
+ compare = NULL;
+
+ LFDS710_MISC_BARRIER_STORE;
+ LFDS710_PAL_ATOMIC_CAS( &lasue_end->next, &compare, lasue, LFDS710_MISC_CAS_STRENGTH_STRONG, result );
+
+ if( result == 1 )
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ else
+ {
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( lasus->end_backoff, backoff_iteration );
+
+ lasue_end = compare;
+ lasue_next = LFDS710_LIST_ASU_GET_NEXT( *lasue_end );
+
+ while( lasue_next != NULL )
+ {
+ lasue_end = lasue_next;
+ lasue_next = LFDS710_LIST_ASU_GET_NEXT( *lasue_end );
+ }
+ }
+ }
+
+ lasus->end = lasue;
+
+ LFDS710_BACKOFF_AUTOTUNE( lasus->end_backoff, backoff_iteration );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds710_list_asu_insert_after_element( struct lfds710_list_asu_state *lasus,
+ struct lfds710_list_asu_element *lasue,
+ struct lfds710_list_asu_element *lasue_predecessor )
+{
+ char unsigned
+ result;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE;
+
+ LFDS710_PAL_ASSERT( lasus != NULL );
+ LFDS710_PAL_ASSERT( lasue != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasue->next % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &lasue->value % LFDS710_PAL_ALIGN_SINGLE_POINTER == 0 );
+ LFDS710_PAL_ASSERT( lasue_predecessor != NULL );
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ lasue->next = lasue_predecessor->next;
+
+ do
+ {
+ LFDS710_MISC_BARRIER_STORE;
+ LFDS710_PAL_ATOMIC_CAS( &lasue_predecessor->next, (struct lfds710_list_asu_element **) &lasue->next, lasue, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+ if( result == 0 )
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( lasus->after_backoff, backoff_iteration );
+ }
+ while( result == 0 );
+
+ LFDS710_BACKOFF_AUTOTUNE( lasus->after_backoff, backoff_iteration );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds710_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_list_addonly_singlylinked_unordered_internal.h"
+
+/***** private prototypes *****/
+static void lfds710_list_asu_internal_validate( struct lfds710_list_asu_state *lasus,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_list_asu_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds710_list_asu_query( struct lfds710_list_asu_state *lasus,
+ enum lfds710_list_asu_query query_type,
+ void *query_input,
+ void *query_output )
+{
+ LFDS710_PAL_ASSERT( lasus != NULL );
+ // TRD : query_type can be any value in its range
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT:
+ {
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ *(lfds710_pal_uint_t *) query_output = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*lasus, lasue) )
+ ( *(lfds710_pal_uint_t *) query_output )++;
+ }
+ break;
+
+ case LFDS710_LIST_ASU_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ lfds710_list_asu_internal_validate( lasus, (struct lfds710_misc_validation_info *) query_input, (enum lfds710_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+
+/****************************************************************************/
+static void lfds710_list_asu_internal_validate( struct lfds710_list_asu_state *lasus,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_list_asu_validity )
+{
+ lfds710_pal_uint_t
+ number_elements = 0;
+
+ struct lfds710_list_asu_element
+ *lasue_fast,
+ *lasue_slow;
+
+ LFDS710_PAL_ASSERT( lasus!= NULL );
+ // TRD : vi can be NULL
+ LFDS710_PAL_ASSERT( lfds710_list_asu_validity != NULL );
+
+ *lfds710_list_asu_validity = LFDS710_MISC_VALIDITY_VALID;
+
+ lasue_slow = lasue_fast = lasus->start->next;
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the start of the list
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the queue)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( lasue_slow != NULL )
+ do
+ {
+ lasue_slow = lasue_slow->next;
+
+ if( lasue_fast != NULL )
+ lasue_fast = lasue_fast->next;
+
+ if( lasue_fast != NULL )
+ lasue_fast = lasue_fast->next;
+ }
+ while( lasue_slow != NULL and lasue_fast != lasue_slow );
+
+ if( lasue_fast != NULL and lasue_slow != NULL and lasue_fast == lasue_slow )
+ *lfds710_list_asu_validity = LFDS710_MISC_VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds710_list_asu_validity == LFDS710_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ lfds710_list_asu_query( lasus, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, &number_elements );
+
+ if( number_elements < vi->min_elements )
+ *lfds710_list_asu_validity = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements > vi->max_elements )
+ *lfds710_list_asu_validity = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+struct lfds710_misc_globals
+ lfds710_misc_globals =
+ {
+ { LFDS710_PRNG_SEED }
+ };
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds710_internal.h"
+
+/***** private prototypes *****/
+void lfds710_misc_prng_internal_big_slow_high_quality_init( int long long unsigned seed );
--- /dev/null
+/***** includes *****/
+#include "lfds710_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_misc_internal_backoff_init( struct lfds710_misc_backoff_state *bs )
+{
+ LFDS710_PAL_ASSERT( bs != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &bs->lock % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+
+ bs->lock = LFDS710_MISC_FLAG_LOWERED;
+ bs->backoff_iteration_frequency_counters[0] = 0;
+ bs->backoff_iteration_frequency_counters[1] = 0;
+ bs->metric = 1;
+ bs->total_operations = 0;
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void lfds710_misc_query( enum lfds710_misc_query query_type,
+ void *query_input,
+ void *query_output )
+{
+ // TRD : query type can be any value in its range
+ // TRD : query_input can be NULL in some cases
+ // TRD : query_outputput can be NULL in some cases
+
+ switch( query_type )
+ {
+ case LFDS710_MISC_QUERY_GET_BUILD_AND_VERSION_STRING:
+ {
+ char static const
+ * const build_and_version_string = "liblfds " LFDS710_MISC_VERSION_STRING " (" BUILD_TYPE_STRING ", " LFDS710_PAL_OS_STRING ", " MODE_TYPE_STRING ", " LFDS710_PAL_PROCESSOR_STRING ", " LFDS710_PAL_COMPILER_STRING ")";
+
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ *(char const **) query_output = build_and_version_string;
+ }
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_prng_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_prng_init_valid_on_current_logical_core( struct lfds710_prng_state *ps, lfds710_pal_uint_t seed )
+{
+ LFDS710_PAL_ASSERT( ps != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &ps->entropy % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ // TRD : seed can be any value in its range (unlike for the mixing function)
+
+ LFDS710_PRNG_ST_MIXING_FUNCTION( seed );
+
+ ps->entropy = seed;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void lfds710_prng_st_init( struct lfds710_prng_st_state *psts, lfds710_pal_uint_t seed )
+{
+ LFDS710_PAL_ASSERT( psts != NULL );
+ LFDS710_PAL_ASSERT( seed != 0 );
+
+ LFDS710_PRNG_ST_MIXING_FUNCTION( seed );
+
+ psts->entropy = seed;
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds710_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_bounded_manyproducer_manyconsumer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_queue_bmm_cleanup( struct lfds710_queue_bmm_state *qbmms,
+ void (*element_cleanup_callback)(struct lfds710_queue_bmm_state *qbmms, void *key, void *value) )
+{
+ void
+ *key,
+ *value;
+
+ LFDS710_PAL_ASSERT( qbmms != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ while( lfds710_queue_bmm_dequeue(qbmms,&key,&value) )
+ element_cleanup_callback( qbmms, key, value );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_bounded_manyproducer_manyconsumer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds710_queue_bmm_dequeue( struct lfds710_queue_bmm_state *qbmms,
+ void **key,
+ void **value )
+{
+ char unsigned
+ result;
+
+ enum lfds710_misc_flag
+ finished_flag = LFDS710_MISC_FLAG_LOWERED;
+
+ int
+ rv = 1;
+
+ lfds710_pal_uint_t
+ read_index,
+ sequence_number;
+
+ lfds710_pal_int_t
+ difference;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE;
+
+ struct lfds710_queue_bmm_element
+ *qbmme = NULL;
+
+ LFDS710_PAL_ASSERT( qbmms != NULL );
+ // TRD : key can be NULL
+ // TRD : value can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ read_index = qbmms->read_index;
+
+ while( finished_flag == LFDS710_MISC_FLAG_LOWERED )
+ {
+ qbmme = &qbmms->element_array[ read_index & qbmms->mask ];
+ LFDS710_MISC_BARRIER_LOAD;
+ sequence_number = qbmme->sequence_number;
+ difference = (lfds710_pal_int_t) sequence_number - (lfds710_pal_int_t) (read_index + 1);
+
+ if( difference == 0 )
+ {
+ LFDS710_PAL_ATOMIC_CAS( &qbmms->read_index, &read_index, read_index + 1, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+ if( result == 0 )
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( qbmms->dequeue_backoff, backoff_iteration );
+ if( result == 1 )
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ }
+
+ if( difference < 0 )
+ {
+ rv = 0;
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ }
+
+ if( difference > 0 )
+ {
+ LFDS710_MISC_BARRIER_LOAD;
+ read_index = qbmms->read_index;
+ }
+ }
+
+ if( rv == 1 )
+ {
+ if( key != NULL )
+ *key = qbmme->key;
+ if( value != NULL )
+ *value = qbmme->value;
+ LFDS710_MISC_BARRIER_STORE;
+ qbmme->sequence_number = read_index + qbmms->mask + 1;
+ }
+
+ LFDS710_BACKOFF_AUTOTUNE( qbmms->dequeue_backoff, backoff_iteration );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_bounded_manyproducer_manyconsumer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds710_queue_bmm_enqueue( struct lfds710_queue_bmm_state *qbmms,
+ void *key,
+ void *value )
+{
+ char unsigned
+ result;
+
+ enum lfds710_misc_flag
+ finished_flag = LFDS710_MISC_FLAG_LOWERED;
+
+ int
+ rv = 1;
+
+ lfds710_pal_uint_t
+ sequence_number,
+ write_index;
+
+ lfds710_pal_int_t
+ difference;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE;
+
+ struct lfds710_queue_bmm_element
+ *qbmme = NULL;
+
+ LFDS710_PAL_ASSERT( qbmms != NULL );
+ // TRD : key can be NULL
+ // TRD : value can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ write_index = qbmms->write_index;
+
+ while( finished_flag == LFDS710_MISC_FLAG_LOWERED )
+ {
+ qbmme = &qbmms->element_array[ write_index & qbmms->mask ];
+ LFDS710_MISC_BARRIER_LOAD;
+ sequence_number = qbmme->sequence_number;
+ difference = (lfds710_pal_int_t) sequence_number - (lfds710_pal_int_t) write_index;
+
+ if( difference == 0 )
+ {
+ LFDS710_PAL_ATOMIC_CAS( &qbmms->write_index, &write_index, write_index + 1, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+ if( result == 0 )
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( qbmms->enqueue_backoff, backoff_iteration );
+ if( result == 1 )
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ }
+
+ if( difference < 0 )
+ {
+ rv = 0;
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ }
+
+ if( difference > 0 )
+ {
+ LFDS710_MISC_BARRIER_LOAD;
+ write_index = qbmms->write_index;
+ }
+ }
+
+ if( rv == 1 )
+ {
+ qbmme->key = key;
+ qbmme->value = value;
+ LFDS710_MISC_BARRIER_STORE;
+ qbmme->sequence_number = write_index + 1;
+ }
+
+ LFDS710_BACKOFF_AUTOTUNE( qbmms->enqueue_backoff, backoff_iteration );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_bounded_manyproducer_manyconsumer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_queue_bmm_init_valid_on_current_logical_core( struct lfds710_queue_bmm_state *qbmms,
+ struct lfds710_queue_bmm_element *element_array,
+ lfds710_pal_uint_t number_elements,
+ void *user_state )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ LFDS710_PAL_ASSERT( qbmms != NULL );
+ LFDS710_PAL_ASSERT( element_array != NULL );
+ LFDS710_PAL_ASSERT( number_elements >= 2 );
+ LFDS710_PAL_ASSERT( ( number_elements & (number_elements-1) ) == 0 ); // TRD : number_elements must be a positive integer power of 2
+ // TRD : user_state can be NULL
+
+ qbmms->number_elements = number_elements;
+ qbmms->mask = qbmms->number_elements - 1;
+ qbmms->read_index = 0;
+ qbmms->write_index = 0;
+ qbmms->element_array = element_array;
+ qbmms->user_state = user_state;
+
+ for( loop = 0 ; loop < qbmms->number_elements ; loop++ )
+ qbmms->element_array[loop].sequence_number = loop;
+
+ lfds710_misc_internal_backoff_init( &qbmms->dequeue_backoff );
+ lfds710_misc_internal_backoff_init( &qbmms->enqueue_backoff );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds710_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_bounded_manyproducer_manyconsumer_internal.h"
+
+/***** private prototypes *****/
+static void lfds710_queue_bmm_internal_validate( struct lfds710_queue_bmm_state *qbmms,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds710_queue_bmm_query( struct lfds710_queue_bmm_state *qbmms,
+ enum lfds710_queue_bmm_query query_type,
+ void *query_input,
+ void *query_output )
+{
+ LFDS710_PAL_ASSERT( qbmms != NULL );
+ // TRD : query_type can be any value in its range
+
+ switch( query_type )
+ {
+ case LFDS710_QUEUE_BMM_QUERY_GET_POTENTIALLY_INACCURATE_COUNT:
+ {
+ lfds710_pal_uint_t
+ local_read_index,
+ local_write_index;
+
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ local_read_index = qbmms->read_index;
+ local_write_index = qbmms->write_index;
+
+ *(lfds710_pal_uint_t *) query_output = +( local_write_index - local_read_index );
+
+ if( local_read_index > local_write_index )
+ *(lfds710_pal_uint_t *) query_output = ((lfds710_pal_uint_t) -1) - *(lfds710_pal_uint_t *) query_output;
+ }
+ break;
+
+ case LFDS710_QUEUE_BMM_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ lfds710_queue_bmm_internal_validate( qbmms, (struct lfds710_misc_validation_info *) query_input, (enum lfds710_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds710_queue_bmm_internal_validate( struct lfds710_queue_bmm_state *qbmms,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_validity )
+{
+ lfds710_pal_uint_t
+ expected_sequence_number,
+ loop,
+ number_elements,
+ sequence_number;
+
+ LFDS710_PAL_ASSERT( qbmms != NULL );
+ // TRD : vi can be NULL
+ LFDS710_PAL_ASSERT( lfds710_validity != NULL );
+
+ *lfds710_validity = LFDS710_MISC_VALIDITY_VALID;
+
+ /* TRD : starting from the read_index, we should find number_elements of incrementing sequence numbers
+ we then perform a second scan from the write_index onwards, which should have (max elements in queue - number_elements) incrementing sequence numbers
+ */
+
+ lfds710_queue_bmm_query( qbmms, LFDS710_QUEUE_BMM_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_elements );
+
+ expected_sequence_number = qbmms->element_array[ qbmms->read_index & qbmms->mask ].sequence_number;
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ {
+ sequence_number = qbmms->element_array[ (qbmms->read_index + loop) & qbmms->mask ].sequence_number;
+
+ if( sequence_number != expected_sequence_number )
+ *lfds710_validity = LFDS710_MISC_VALIDITY_INVALID_ORDER;
+
+ if( sequence_number == expected_sequence_number )
+ expected_sequence_number = sequence_number + 1;
+ }
+
+ // TRD : now the write_index onwards
+
+ expected_sequence_number = qbmms->element_array[ qbmms->write_index & qbmms->mask ].sequence_number;
+
+ for( loop = 0 ; loop < qbmms->number_elements - number_elements ; loop++ )
+ {
+ sequence_number = qbmms->element_array[ (qbmms->write_index + loop) & qbmms->mask ].sequence_number;
+
+ if( sequence_number != expected_sequence_number )
+ *lfds710_validity = LFDS710_MISC_VALIDITY_INVALID_ORDER;
+
+ if( sequence_number == expected_sequence_number )
+ expected_sequence_number = sequence_number + 1;
+ }
+
+ // TRD : now check against the expected number of elements
+
+ if( *lfds710_validity == LFDS710_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ lfds710_pal_uint_t
+ number_elements;
+
+ lfds710_queue_bmm_query( qbmms, LFDS710_QUEUE_BMM_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_elements );
+
+ if( number_elements < vi->min_elements )
+ *lfds710_validity = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements > vi->max_elements )
+ *lfds710_validity = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_bounded_singleproducer_singleconsumer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_queue_bss_cleanup( struct lfds710_queue_bss_state *qbsss,
+ void (*element_cleanup_callback)(struct lfds710_queue_bss_state *qbsss, void *key, void *value) )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ struct lfds710_queue_bss_element
+ *qbsse;
+
+ LFDS710_PAL_ASSERT( qbsss != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ if( element_cleanup_callback != NULL )
+ for( loop = qbsss->read_index ; loop < qbsss->read_index + qbsss->number_elements ; loop++ )
+ {
+ qbsse = qbsss->element_array + (loop % qbsss->number_elements);
+ element_cleanup_callback( qbsss, qbsse->key, qbsse->value );
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_bounded_singleproducer_singleconsumer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds710_queue_bss_dequeue( struct lfds710_queue_bss_state *qbsss,
+ void **key,
+ void **value )
+{
+ struct lfds710_queue_bss_element
+ *qbsse;
+
+ LFDS710_PAL_ASSERT( qbsss != NULL );
+ // TRD : key can be NULL
+ // TRD : value can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( qbsss->read_index != qbsss->write_index )
+ {
+ qbsse = qbsss->element_array + qbsss->read_index;
+
+ if( key != NULL )
+ *key = qbsse->key;
+
+ if( value != NULL )
+ *value = qbsse->value;
+
+ qbsss->read_index = (qbsss->read_index + 1) & qbsss->mask;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ return 1;
+ }
+
+ return 0;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_bounded_singleproducer_singleconsumer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds710_queue_bss_enqueue( struct lfds710_queue_bss_state *qbsss,
+ void *key,
+ void *value )
+{
+ struct lfds710_queue_bss_element
+ *qbsse;
+
+ LFDS710_PAL_ASSERT( qbsss != NULL );
+ // TRD : key can be NULL
+ // TRD : value can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( ( (qbsss->write_index+1) & qbsss->mask ) != qbsss->read_index )
+ {
+ qbsse = qbsss->element_array + qbsss->write_index;
+
+ qbsse->key = key;
+ qbsse->value = value;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ qbsss->write_index = (qbsss->write_index + 1) & qbsss->mask;
+
+ return 1;
+ }
+
+ return 0;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_bounded_singleproducer_singleconsumer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_queue_bss_init_valid_on_current_logical_core( struct lfds710_queue_bss_state *qbsss,
+ struct lfds710_queue_bss_element *element_array,
+ lfds710_pal_uint_t number_elements,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( qbsss != NULL );
+ LFDS710_PAL_ASSERT( element_array != NULL );
+ LFDS710_PAL_ASSERT( number_elements >= 2 );
+ LFDS710_PAL_ASSERT( ( number_elements & (number_elements-1) ) == 0 ); // TRD : number_elements must be a positive integer power of 2
+ // TRD : user_state can be NULL
+
+ /* TRD : the use of mask and the restriction on a power of two
+ upon the number of elements bears some remark
+
+ in this queue, there are a fixed number of elements
+ we have a read index and a write index
+ when we write, and thre is space to write, we increment the write index
+ (if no space to write, we just return)
+ when we read, and there are elements to be read, we after reading increment the read index
+ (if no elements to read, we just return)
+ the problem is - how do we handle wrap around?
+ e.g. when I write, but my write index is now equal to the number of elements
+ the usual solution is to modulus the write index by the nunmber of elements
+ problem is modulus is slow
+ there is a better way
+ first, we restrict the number of elements to be a power of two
+ so imagine we have a 64-bit system and we set the number of elements to be 2^64
+ this gives us a bit pattern of 1000 0000 0000 0000 (...etc, lots of zeros)
+ now (just roll with this for a bit) subtract one from this
+ this gives us a mask (on a two's compliment machine)
+ 0111 1111 1111 1111 (...etc, lots of ones)
+ so what we do now, when we increment an index (think of the write index as the example)
+ we bitwise and it with the mask
+ now think about thwt happens
+ all the numbers up to 2^64 will be unchanged - their MSB is never set, and we and with all the other bits
+ but when we finally hit 2^64 and need to roll over... bingo!
+ we drop MSB (which we finally have) and have the value 0!
+ this is exactly what we want
+ bitwise and is much faster than modulus
+ */
+
+ qbsss->number_elements = number_elements;
+ qbsss->mask = qbsss->number_elements - 1;
+ qbsss->read_index = 0;
+ qbsss->write_index = 0;
+ qbsss->element_array = element_array;
+ qbsss->user_state = user_state;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds710_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_bounded_singleproducer_singleconsumer_internal.h"
+
+/***** private prototypes *****/
+static void lfds710_queue_bss_internal_validate( struct lfds710_queue_bss_state *qbsss,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds710_queue_bss_query( struct lfds710_queue_bss_state *qbsss,
+ enum lfds710_queue_bss_query query_type,
+ void *query_input,
+ void *query_output )
+{
+ LFDS710_PAL_ASSERT( qbsss != NULL );
+ // TRD : query_type can be any value in its range
+
+ switch( query_type )
+ {
+ case LFDS710_QUEUE_BSS_QUERY_GET_POTENTIALLY_INACCURATE_COUNT:
+ {
+ lfds710_pal_uint_t
+ local_read_index,
+ local_write_index;
+
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ local_read_index = qbsss->read_index;
+ local_write_index = qbsss->write_index;
+
+ *(lfds710_pal_uint_t *) query_output = +( local_write_index - local_read_index );
+
+ if( local_read_index > local_write_index )
+ *(lfds710_pal_uint_t *) query_output = qbsss->number_elements - *(lfds710_pal_uint_t *) query_output;
+ }
+ break;
+
+ case LFDS710_QUEUE_BSS_QUERY_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ lfds710_queue_bss_internal_validate( qbsss, (struct lfds710_misc_validation_info *) query_input, (enum lfds710_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds710_queue_bss_internal_validate( struct lfds710_queue_bss_state *qbsss,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_validity )
+{
+ LFDS710_PAL_ASSERT( qbsss != NULL );
+ // TRD : vi can be NULL
+ LFDS710_PAL_ASSERT( lfds710_validity != NULL );
+
+ *lfds710_validity = LFDS710_MISC_VALIDITY_VALID;
+
+ if( vi != NULL )
+ {
+ lfds710_pal_uint_t
+ number_elements;
+
+ lfds710_queue_bss_query( qbsss, LFDS710_QUEUE_BSS_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_elements );
+
+ if( number_elements < vi->min_elements )
+ *lfds710_validity = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements > vi->max_elements )
+ *lfds710_validity = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_unbounded_manyproducer_manyconsumer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_queue_umm_cleanup( struct lfds710_queue_umm_state *qumms,
+ void (*element_cleanup_callback)(struct lfds710_queue_umm_state *qumms, struct lfds710_queue_umm_element *qumme, enum lfds710_misc_flag dummy_element_flag) )
+{
+ struct lfds710_queue_umm_element
+ *qumme;
+
+ void
+ *value;
+
+ LFDS710_PAL_ASSERT( qumms != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ {
+ while( qumms->dequeue[POINTER] != qumms->enqueue[POINTER] )
+ {
+ // TRD : trailing dummy element, so the first real value is in the next element
+ value = qumms->dequeue[POINTER]->next[POINTER]->value;
+
+ // TRD : user is given back *an* element, but not the one his user data was in
+ qumme = qumms->dequeue[POINTER];
+
+ // TRD : remove the element from queue
+ qumms->dequeue[POINTER] = qumms->dequeue[POINTER]->next[POINTER];
+
+ // TRD : write value into the qumme we're going to give the user
+ qumme->value = value;
+
+ element_cleanup_callback( qumms, qumme, LFDS710_MISC_FLAG_LOWERED );
+ }
+
+ // TRD : and now the final element
+ element_cleanup_callback( qumms, (struct lfds710_queue_umm_element *) qumms->dequeue[POINTER], LFDS710_MISC_FLAG_RAISED );
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_unbounded_manyproducer_manyconsumer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds710_queue_umm_dequeue( struct lfds710_queue_umm_state *qumms,
+ struct lfds710_queue_umm_element **qumme )
+{
+ char unsigned
+ result = 0;
+
+ enum lfds710_misc_flag
+ finished_flag = LFDS710_MISC_FLAG_LOWERED;
+
+ enum lfds710_queue_umm_queue_state
+ state = LFDS710_QUEUE_UMM_QUEUE_STATE_UNKNOWN;
+
+ int
+ rv = 1;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE;
+
+ struct lfds710_queue_umm_element LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_DOUBLE_POINTER)
+ *dequeue[PAC_SIZE],
+ *enqueue[PAC_SIZE],
+ *next[PAC_SIZE];
+
+ void
+ *key = NULL,
+ *value = NULL;
+
+ LFDS710_PAL_ASSERT( qumms != NULL );
+ LFDS710_PAL_ASSERT( qumme != NULL );
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ do
+ {
+ /* TRD : note here the deviation from the white paper
+ in the white paper, next is loaded from dequeue, not from qumms->dequeue
+ what concerns me is that between the load of dequeue and the load of
+ enqueue->next, the element can be dequeued by another thread *and freed*
+
+ by ordering the loads (load barriers), and loading both from qumms,
+ the following if(), which checks dequeue is still the same as qumms->enqueue
+ still continues to ensure next belongs to enqueue, while avoiding the
+ problem with free
+ */
+
+ dequeue[COUNTER] = qumms->dequeue[COUNTER];
+ dequeue[POINTER] = qumms->dequeue[POINTER];
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ enqueue[COUNTER] = qumms->enqueue[COUNTER];
+ enqueue[POINTER] = qumms->enqueue[POINTER];
+
+ next[COUNTER] = qumms->dequeue[POINTER]->next[COUNTER];
+ next[POINTER] = qumms->dequeue[POINTER]->next[POINTER];
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( qumms->dequeue[COUNTER] == dequeue[COUNTER] and qumms->dequeue[POINTER] == dequeue[POINTER] )
+ {
+ if( enqueue[POINTER] == dequeue[POINTER] and next[POINTER] == NULL )
+ state = LFDS710_QUEUE_UMM_QUEUE_STATE_EMPTY;
+
+ if( enqueue[POINTER] == dequeue[POINTER] and next[POINTER] != NULL )
+ state = LFDS710_QUEUE_UMM_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE;
+
+ if( enqueue[POINTER] != dequeue[POINTER] )
+ state = LFDS710_QUEUE_UMM_QUEUE_STATE_ATTEMPT_DEQUEUE;
+
+ switch( state )
+ {
+ case LFDS710_QUEUE_UMM_QUEUE_STATE_UNKNOWN:
+ // TRD : eliminates compiler warning
+ break;
+
+ case LFDS710_QUEUE_UMM_QUEUE_STATE_EMPTY:
+ rv = 0;
+ *qumme = NULL;
+ result = 1;
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ break;
+
+ case LFDS710_QUEUE_UMM_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE:
+ next[COUNTER] = enqueue[COUNTER] + 1;
+ LFDS710_PAL_ATOMIC_DWCAS( qumms->enqueue, enqueue, next, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+ // TRD : in fact if result is 1 (successful) I think we can now simply drop down into the dequeue attempt
+ break;
+
+ case LFDS710_QUEUE_UMM_QUEUE_STATE_ATTEMPT_DEQUEUE:
+ key = next[POINTER]->key;
+ value = next[POINTER]->value;
+
+ next[COUNTER] = dequeue[COUNTER] + 1;
+ LFDS710_PAL_ATOMIC_DWCAS( qumms->dequeue, dequeue, next, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+
+ if( result == 1 )
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ break;
+ }
+ }
+ else
+ result = 0;
+
+ if( result == 0 )
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( qumms->dequeue_backoff, backoff_iteration );
+ }
+ while( finished_flag == LFDS710_MISC_FLAG_LOWERED );
+
+ if( result == 1 )
+ {
+ *qumme = dequeue[POINTER];
+ (*qumme)->key = key;
+ (*qumme)->value = value;
+ }
+
+ LFDS710_BACKOFF_AUTOTUNE( qumms->dequeue_backoff, backoff_iteration );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_unbounded_manyproducer_manyconsumer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_queue_umm_enqueue( struct lfds710_queue_umm_state *qumms,
+ struct lfds710_queue_umm_element *qumme )
+{
+ char unsigned
+ result = 0;
+
+ enum lfds710_misc_flag
+ finished_flag = LFDS710_MISC_FLAG_LOWERED;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE;
+
+ struct lfds710_queue_umm_element LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_DOUBLE_POINTER)
+ *volatile enqueue[PAC_SIZE],
+ *new_enqueue[PAC_SIZE],
+ *volatile next[PAC_SIZE];
+
+ LFDS710_PAL_ASSERT( qumms != NULL );
+ LFDS710_PAL_ASSERT( qumme != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) qumme->next % LFDS710_PAL_ALIGN_DOUBLE_POINTER == 0 );
+
+ qumme->next[POINTER] = NULL;
+ LFDS710_PAL_ATOMIC_ADD( &qumms->aba_counter, 1, qumme->next[COUNTER], struct lfds710_queue_umm_element * );
+ LFDS710_MISC_BARRIER_STORE;
+
+ new_enqueue[POINTER] = qumme;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ do
+ {
+ /* TRD : note here the deviation from the white paper
+ in the white paper, next is loaded from enqueue, not from qumms->enqueue
+ what concerns me is that between the load of enqueue and the load of
+ enqueue->next, the element can be dequeued by another thread *and freed*
+
+ by ordering the loads (load barriers), and loading both from qumms,
+ the following if(), which checks enqueue is still the same as qumms->enqueue
+ still continues to ensure next belongs to enqueue, while avoiding the
+ problem with free
+ */
+
+ enqueue[COUNTER] = qumms->enqueue[COUNTER];
+ enqueue[POINTER] = qumms->enqueue[POINTER];
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ next[COUNTER] = qumms->enqueue[POINTER]->next[COUNTER];
+ next[POINTER] = qumms->enqueue[POINTER]->next[POINTER];
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( qumms->enqueue[COUNTER] == enqueue[COUNTER] and qumms->enqueue[POINTER] == enqueue[POINTER] )
+ {
+ if( next[POINTER] == NULL )
+ {
+ new_enqueue[COUNTER] = next[COUNTER] + 1;
+ LFDS710_PAL_ATOMIC_DWCAS( enqueue[POINTER]->next, next, new_enqueue, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+ if( result == 1 )
+ finished_flag = LFDS710_MISC_FLAG_RAISED;
+ }
+ else
+ {
+ next[COUNTER] = enqueue[COUNTER] + 1;
+ // TRD : strictly, this is a weak CAS, but we do an extra iteration of the main loop on a fake failure, so we set it to be strong
+ LFDS710_PAL_ATOMIC_DWCAS( qumms->enqueue, enqueue, next, LFDS710_MISC_CAS_STRENGTH_STRONG, result );
+ }
+ }
+ else
+ result = 0;
+
+ if( result == 0 )
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( qumms->enqueue_backoff, backoff_iteration );
+ }
+ while( finished_flag == LFDS710_MISC_FLAG_LOWERED );
+
+ // TRD : move enqueue along; only a weak CAS as the dequeue will solve this if it's out of place
+ new_enqueue[COUNTER] = enqueue[COUNTER] + 1;
+ LFDS710_PAL_ATOMIC_DWCAS( qumms->enqueue, enqueue, new_enqueue, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+
+ if( result == 0 )
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( qumms->enqueue_backoff, backoff_iteration );
+
+ LFDS710_BACKOFF_AUTOTUNE( qumms->enqueue_backoff, backoff_iteration );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_unbounded_manyproducer_manyconsumer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_queue_umm_init_valid_on_current_logical_core( struct lfds710_queue_umm_state *qumms,
+ struct lfds710_queue_umm_element *qumme_dummy,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( qumms != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &qumms->enqueue % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &qumms->dequeue % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &qumms->user_state % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS710_PAL_ASSERT( qumme_dummy != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) qumme_dummy->next % LFDS710_PAL_ALIGN_DOUBLE_POINTER == 0 );
+ // TRD : user_state can be NULL
+
+ /* TRD : qumme_dummy is a dummy element, needed for init
+ the qumms->enqueue and qumms->dequeue counters do not need to be initialized
+ but it does no harm to do so, and stops a valgrind complaint
+ */
+
+ LFDS710_PRNG_GENERATE( lfds710_misc_globals.ps, qumms->aba_counter );
+
+ qumms->enqueue[POINTER] = qumme_dummy;
+ qumms->enqueue[COUNTER] = (struct lfds710_queue_umm_element *) 0;
+ qumms->dequeue[POINTER] = qumme_dummy;
+ qumms->dequeue[COUNTER] = (struct lfds710_queue_umm_element *) 0;
+
+ qumme_dummy->next[POINTER] = NULL;
+ // TRD : no need here for an atomic add as we have a store barrier and force store below
+ qumme_dummy->next[COUNTER] = (struct lfds710_queue_umm_element *) qumms->aba_counter++;
+ qumme_dummy->key = NULL;
+ qumme_dummy->value = NULL;
+
+ qumms->user_state = user_state;
+
+ lfds710_misc_internal_backoff_init( &qumms->dequeue_backoff );
+ lfds710_misc_internal_backoff_init( &qumms->enqueue_backoff );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds710_internal.h"
+
+/***** enums *****/
+enum lfds710_queue_umm_queue_state
+{
+ LFDS710_QUEUE_UMM_QUEUE_STATE_UNKNOWN,
+ LFDS710_QUEUE_UMM_QUEUE_STATE_EMPTY,
+ LFDS710_QUEUE_UMM_QUEUE_STATE_ENQUEUE_OUT_OF_PLACE,
+ LFDS710_QUEUE_UMM_QUEUE_STATE_ATTEMPT_DEQUEUE
+};
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_queue_unbounded_manyproducer_manyconsumer_internal.h"
+
+/***** private prototypes *****/
+static void lfds710_queue_umm_internal_validate( struct lfds710_queue_umm_state *qumms,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_queue_umm_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds710_queue_umm_query( struct lfds710_queue_umm_state *qumms,
+ enum lfds710_queue_umm_query query_type,
+ void *query_input,
+ void *query_output )
+{
+ struct lfds710_queue_umm_element
+ *qumme;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( qumms != NULL );
+ // TRD : query_type can be any value in its range
+
+ switch( query_type )
+ {
+ case LFDS710_QUEUE_UMM_QUERY_SINGLETHREADED_GET_COUNT:
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ *(lfds710_pal_uint_t *) query_output = 0;
+
+ qumme = (struct lfds710_queue_umm_element *) qumms->dequeue[POINTER];
+
+ while( qumme != NULL )
+ {
+ ( *(lfds710_pal_uint_t *) query_output )++;
+ qumme = (struct lfds710_queue_umm_element *) qumme->next[POINTER];
+ }
+
+ // TRD : remember there is a dummy element in the queue
+ ( *(lfds710_pal_uint_t *) query_output )--;
+ break;
+
+ case LFDS710_QUEUE_UMM_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ lfds710_queue_umm_internal_validate( qumms, (struct lfds710_misc_validation_info *) query_input, (enum lfds710_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds710_queue_umm_internal_validate( struct lfds710_queue_umm_state *qumms,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_queue_umm_validity )
+{
+ lfds710_pal_uint_t
+ number_elements = 0;
+
+ struct lfds710_queue_umm_element
+ *qumme_fast,
+ *qumme_slow;
+
+ LFDS710_PAL_ASSERT( qumms != NULL );
+ // TRD : vi can be NULL
+ LFDS710_PAL_ASSERT( lfds710_queue_umm_validity != NULL );
+
+ *lfds710_queue_umm_validity = LFDS710_MISC_VALIDITY_VALID;
+
+ qumme_slow = qumme_fast = (struct lfds710_queue_umm_element *) qumms->dequeue[POINTER];
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the dequeue end of the queue
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the queue)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( qumme_slow != NULL )
+ do
+ {
+ qumme_slow = qumme_slow->next[POINTER];
+
+ if( qumme_fast != NULL )
+ qumme_fast = qumme_fast->next[POINTER];
+
+ if( qumme_fast != NULL )
+ qumme_fast = qumme_fast->next[POINTER];
+ }
+ while( qumme_slow != NULL and qumme_fast != qumme_slow );
+
+ if( qumme_fast != NULL and qumme_slow != NULL and qumme_fast == qumme_slow )
+ *lfds710_queue_umm_validity = LFDS710_MISC_VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds710_queue_umm_validity == LFDS710_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ lfds710_queue_umm_query( qumms, LFDS710_QUEUE_UMM_QUERY_SINGLETHREADED_GET_COUNT, NULL, (void *) &number_elements );
+
+ if( number_elements < vi->min_elements )
+ *lfds710_queue_umm_validity = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements > vi->max_elements )
+ *lfds710_queue_umm_validity = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_ringbuffer_internal.h"
+
+/***** private prototypes *****/
+static void lfds710_ringbuffer_internal_queue_umm_element_cleanup_callback( struct lfds710_queue_umm_state *qumms,
+ struct lfds710_queue_umm_element *qumme,
+ enum lfds710_misc_flag dummy_element_flag );
+static void lfds710_ringbuffer_internal_freelist_element_cleanup_callback( struct lfds710_freelist_state *fs,
+ struct lfds710_freelist_element *fe );
+
+
+
+
+
+/****************************************************************************/
+void lfds710_ringbuffer_cleanup( struct lfds710_ringbuffer_state *rs,
+ void (*element_cleanup_callback)(struct lfds710_ringbuffer_state *rs, void *key, void *value, enum lfds710_misc_flag unread_flag) )
+{
+ LFDS710_PAL_ASSERT( rs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ if( element_cleanup_callback != NULL )
+ {
+ rs->element_cleanup_callback = element_cleanup_callback;
+ lfds710_queue_umm_cleanup( &rs->qumms, lfds710_ringbuffer_internal_queue_umm_element_cleanup_callback );
+ lfds710_freelist_cleanup( &rs->fs, lfds710_ringbuffer_internal_freelist_element_cleanup_callback );
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void lfds710_ringbuffer_internal_queue_umm_element_cleanup_callback( struct lfds710_queue_umm_state *qumms,
+ struct lfds710_queue_umm_element *qumme,
+ enum lfds710_misc_flag dummy_element_flag )
+{
+ struct lfds710_ringbuffer_element
+ *re;
+
+ struct lfds710_ringbuffer_state
+ *rs;
+
+ LFDS710_PAL_ASSERT( qumms != NULL );
+ LFDS710_PAL_ASSERT( qumme != NULL );
+ // TRD : dummy_element can be any value in its range
+
+ rs = (struct lfds710_ringbuffer_state *) LFDS710_QUEUE_UMM_GET_USER_STATE_FROM_STATE( *qumms );
+ re = (struct lfds710_ringbuffer_element *) LFDS710_QUEUE_UMM_GET_VALUE_FROM_ELEMENT( *qumme );
+
+ if( dummy_element_flag == LFDS710_MISC_FLAG_LOWERED )
+ rs->element_cleanup_callback( rs, re->key, re->value, LFDS710_MISC_FLAG_RAISED );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void lfds710_ringbuffer_internal_freelist_element_cleanup_callback( struct lfds710_freelist_state *fs,
+ struct lfds710_freelist_element *fe )
+{
+ struct lfds710_ringbuffer_element
+ *re;
+
+ struct lfds710_ringbuffer_state
+ *rs;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ rs = (struct lfds710_ringbuffer_state *) LFDS710_FREELIST_GET_USER_STATE_FROM_STATE( *fs );
+ re = (struct lfds710_ringbuffer_element *) LFDS710_FREELIST_GET_VALUE_FROM_ELEMENT( *fe );
+
+ rs->element_cleanup_callback( rs, re->key, re->value, LFDS710_MISC_FLAG_LOWERED );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_ringbuffer_init_valid_on_current_logical_core( struct lfds710_ringbuffer_state *rs,
+ struct lfds710_ringbuffer_element *re_array_inc_dummy,
+ lfds710_pal_uint_t number_elements_inc_dummy,
+ void *user_state )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( re_array_inc_dummy != NULL );
+ LFDS710_PAL_ASSERT( number_elements_inc_dummy >= 2 );
+ // TRD : user_state can be NULL
+
+ rs->user_state = user_state;
+
+ re_array_inc_dummy[0].qumme_use = &re_array_inc_dummy[0].qumme;
+
+ lfds710_freelist_init_valid_on_current_logical_core( &rs->fs, NULL, 0, rs );
+ lfds710_queue_umm_init_valid_on_current_logical_core( &rs->qumms, &re_array_inc_dummy[0].qumme, rs );
+
+ for( loop = 1 ; loop < number_elements_inc_dummy ; loop++ )
+ {
+ re_array_inc_dummy[loop].qumme_use = &re_array_inc_dummy[loop].qumme;
+ LFDS710_FREELIST_SET_VALUE_IN_ELEMENT( re_array_inc_dummy[loop].fe, &re_array_inc_dummy[loop] );
+ lfds710_freelist_push( &rs->fs, &re_array_inc_dummy[loop].fe, NULL );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds710_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_ringbuffer_internal.h"
+
+/***** private prototypes *****/
+static void lfds710_ringbuffer_internal_validate( struct lfds710_ringbuffer_state *rs,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_queue_umm_validity,
+ enum lfds710_misc_validity *lfds710_freelist_validity );
+
+
+
+/****************************************************************************/
+void lfds710_ringbuffer_query( struct lfds710_ringbuffer_state *rs,
+ enum lfds710_ringbuffer_query query_type,
+ void *query_input,
+ void *query_output )
+{
+ LFDS710_PAL_ASSERT( rs != NULL );
+ // TRD : query_type can be any value in its range
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LFDS710_RINGBUFFER_QUERY_SINGLETHREADED_GET_COUNT:
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ lfds710_queue_umm_query( &rs->qumms, LFDS710_QUEUE_UMM_QUERY_SINGLETHREADED_GET_COUNT, NULL, query_output );
+ break;
+
+ case LFDS710_RINGBUFFER_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ lfds710_ringbuffer_internal_validate( rs, (struct lfds710_misc_validation_info *) query_input, (enum lfds710_misc_validity *) query_output, ((enum lfds710_misc_validity *) query_output)+1 );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds710_ringbuffer_internal_validate( struct lfds710_ringbuffer_state *rs,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_queue_umm_validity,
+ enum lfds710_misc_validity *lfds710_freelist_validity )
+{
+ LFDS710_PAL_ASSERT( rs != NULL );
+ // TRD : vi can be NULL
+ LFDS710_PAL_ASSERT( lfds710_queue_umm_validity != NULL );
+ LFDS710_PAL_ASSERT( lfds710_freelist_validity != NULL );
+
+ if( vi == NULL )
+ {
+ lfds710_queue_umm_query( &rs->qumms, LFDS710_QUEUE_UMM_QUERY_SINGLETHREADED_VALIDATE, NULL, lfds710_queue_umm_validity );
+ lfds710_freelist_query( &rs->fs, LFDS710_FREELIST_QUERY_SINGLETHREADED_VALIDATE, NULL, lfds710_freelist_validity );
+ }
+
+ if( vi != NULL )
+ {
+ struct lfds710_misc_validation_info
+ freelist_vi,
+ queue_vi;
+
+ queue_vi.min_elements = 0;
+ freelist_vi.min_elements = 0;
+ queue_vi.max_elements = vi->max_elements;
+ freelist_vi.max_elements = vi->max_elements;
+
+ lfds710_queue_umm_query( &rs->qumms, LFDS710_QUEUE_UMM_QUERY_SINGLETHREADED_VALIDATE, &queue_vi, lfds710_queue_umm_validity );
+ lfds710_freelist_query( &rs->fs, LFDS710_FREELIST_QUERY_SINGLETHREADED_VALIDATE, &freelist_vi, lfds710_freelist_validity );
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds710_ringbuffer_read( struct lfds710_ringbuffer_state *rs,
+ void **key,
+ void **value )
+{
+ int
+ rv;
+
+ struct lfds710_queue_umm_element
+ *qumme;
+
+ struct lfds710_ringbuffer_element
+ *re;
+
+ LFDS710_PAL_ASSERT( rs != NULL );
+ // TRD : key can be NULL
+ // TRD : value can be NULL
+ // TRD : psts can be NULL
+
+ rv = lfds710_queue_umm_dequeue( &rs->qumms, &qumme );
+
+ if( rv == 1 )
+ {
+ re = LFDS710_QUEUE_UMM_GET_VALUE_FROM_ELEMENT( *qumme );
+ re->qumme_use = (struct lfds710_queue_umm_element *) qumme;
+ if( key != NULL )
+ *key = re->key;
+ if( value != NULL )
+ *value = re->value;
+ LFDS710_FREELIST_SET_VALUE_IN_ELEMENT( re->fe, re );
+ lfds710_freelist_push( &rs->fs, &re->fe, NULL );
+ }
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_ringbuffer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_ringbuffer_write( struct lfds710_ringbuffer_state *rs,
+ void *key,
+ void *value,
+ enum lfds710_misc_flag *overwrite_occurred_flag,
+ void **overwritten_key,
+ void **overwritten_value )
+{
+ int
+ rv = 0;
+
+ struct lfds710_freelist_element
+ *fe;
+
+ struct lfds710_queue_umm_element
+ *qumme;
+
+ struct lfds710_ringbuffer_element
+ *re = NULL;
+
+ LFDS710_PAL_ASSERT( rs != NULL );
+ // TRD : key can be NULL
+ // TRD : value can be NULL
+ // TRD : overwrite_occurred_flag can be NULL
+ // TRD : overwritten_key can be NULL
+ // TRD : overwritten_value can be NULL
+ // TRD : psts can be NULL
+
+ if( overwrite_occurred_flag != NULL )
+ *overwrite_occurred_flag = LFDS710_MISC_FLAG_LOWERED;
+
+ do
+ {
+ rv = lfds710_freelist_pop( &rs->fs, &fe, NULL );
+
+ if( rv == 1 )
+ re = LFDS710_FREELIST_GET_VALUE_FROM_ELEMENT( *fe );
+
+ if( rv == 0 )
+ {
+ // TRD : the queue can return empty as well - remember, we're lock-free; anything could have happened since the previous instruction
+ rv = lfds710_queue_umm_dequeue( &rs->qumms, &qumme );
+
+ if( rv == 1 )
+ {
+ re = LFDS710_QUEUE_UMM_GET_VALUE_FROM_ELEMENT( *qumme );
+ re->qumme_use = (struct lfds710_queue_umm_element *) qumme;
+
+ if( overwrite_occurred_flag != NULL )
+ *overwrite_occurred_flag = LFDS710_MISC_FLAG_RAISED;
+
+ if( overwritten_key != NULL )
+ *overwritten_key = re->key;
+
+ if( overwritten_value != NULL )
+ *overwritten_value = re->value;
+ }
+ }
+ }
+ while( rv == 0 );
+
+ re->key = key;
+ re->value = value;
+
+ LFDS710_QUEUE_UMM_SET_VALUE_IN_ELEMENT( *re->qumme_use, re );
+ lfds710_queue_umm_enqueue( &rs->qumms, re->qumme_use );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_stack_cleanup( struct lfds710_stack_state *ss,
+ void (*element_cleanup_callback)(struct lfds710_stack_state *ss, struct lfds710_stack_element *se) )
+{
+ struct lfds710_stack_element
+ *se,
+ *se_temp;
+
+ LFDS710_PAL_ASSERT( ss != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ {
+ se = ss->top[POINTER];
+
+ while( se != NULL )
+ {
+ se_temp = se;
+ se = se->next;
+
+ element_cleanup_callback( ss, se_temp );
+ }
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_stack_init_valid_on_current_logical_core( struct lfds710_stack_state *ss,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( ss != NULL );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) ss->top % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ LFDS710_PAL_ASSERT( (lfds710_pal_uint_t) &ss->user_state % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES == 0 );
+ // TRD : user_state can be NULL
+
+ ss->top[POINTER] = NULL;
+ ss->top[COUNTER] = 0;
+
+ ss->user_state = user_state;
+
+ lfds710_misc_internal_backoff_init( &ss->pop_backoff );
+ lfds710_misc_internal_backoff_init( &ss->push_backoff );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../liblfds710_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int lfds710_stack_pop( struct lfds710_stack_state *ss,
+ struct lfds710_stack_element **se )
+{
+ char unsigned
+ result;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE;
+
+ struct lfds710_stack_element LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_DOUBLE_POINTER)
+ *new_top[PAC_SIZE],
+ *volatile original_top[PAC_SIZE];
+
+ LFDS710_PAL_ASSERT( ss != NULL );
+ LFDS710_PAL_ASSERT( se != NULL );
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ original_top[COUNTER] = ss->top[COUNTER];
+ original_top[POINTER] = ss->top[POINTER];
+
+ do
+ {
+ if( original_top[POINTER] == NULL )
+ {
+ *se = NULL;
+ return 0;
+ }
+
+ new_top[COUNTER] = original_top[COUNTER] + 1;
+ new_top[POINTER] = original_top[POINTER]->next;
+
+ LFDS710_PAL_ATOMIC_DWCAS( ss->top, original_top, new_top, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+
+ if( result == 0 )
+ {
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( ss->pop_backoff, backoff_iteration );
+ LFDS710_MISC_BARRIER_LOAD;
+ }
+ }
+ while( result == 0 );
+
+ *se = original_top[POINTER];
+
+ LFDS710_BACKOFF_AUTOTUNE( ss->pop_backoff, backoff_iteration );
+
+ return 1;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_stack_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void lfds710_stack_push( struct lfds710_stack_state *ss,
+ struct lfds710_stack_element *se )
+{
+ char unsigned
+ result;
+
+ lfds710_pal_uint_t
+ backoff_iteration = LFDS710_BACKOFF_INITIAL_VALUE;
+
+ struct lfds710_stack_element LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_DOUBLE_POINTER)
+ *new_top[PAC_SIZE],
+ *volatile original_top[PAC_SIZE];
+
+ LFDS710_PAL_ASSERT( ss != NULL );
+ LFDS710_PAL_ASSERT( se != NULL );
+
+ new_top[POINTER] = se;
+
+ original_top[COUNTER] = ss->top[COUNTER];
+ original_top[POINTER] = ss->top[POINTER];
+
+ do
+ {
+ se->next = original_top[POINTER];
+ LFDS710_MISC_BARRIER_STORE;
+
+ new_top[COUNTER] = original_top[COUNTER] + 1;
+ LFDS710_PAL_ATOMIC_DWCAS( ss->top, original_top, new_top, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+
+ if( result == 0 )
+ LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( ss->push_backoff, backoff_iteration );
+ }
+ while( result == 0 );
+
+ LFDS710_BACKOFF_AUTOTUNE( ss->push_backoff, backoff_iteration );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "lfds710_stack_internal.h"
+
+/***** private prototypes *****/
+static void lfds710_stack_internal_stack_validate( struct lfds710_stack_state *ss,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_stack_validity );
+
+
+
+
+
+/****************************************************************************/
+void lfds710_stack_query( struct lfds710_stack_state *ss,
+ enum lfds710_stack_query query_type,
+ void *query_input,
+ void *query_output )
+{
+ struct lfds710_stack_element
+ *se;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( ss != NULL );
+ // TRD : query_type can be any value in its range
+
+ switch( query_type )
+ {
+ case LFDS710_STACK_QUERY_SINGLETHREADED_GET_COUNT:
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ *(lfds710_pal_uint_t *) query_output = 0;
+
+ se = (struct lfds710_stack_element *) ss->top[POINTER];
+
+ while( se != NULL )
+ {
+ ( *(lfds710_pal_uint_t *) query_output )++;
+ se = (struct lfds710_stack_element *) se->next;
+ }
+ break;
+
+ case LFDS710_STACK_QUERY_SINGLETHREADED_VALIDATE:
+ // TRD : query_input can be NULL
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ lfds710_stack_internal_stack_validate( ss, (struct lfds710_misc_validation_info *) query_input, (enum lfds710_misc_validity *) query_output );
+ break;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void lfds710_stack_internal_stack_validate( struct lfds710_stack_state *ss,
+ struct lfds710_misc_validation_info *vi,
+ enum lfds710_misc_validity *lfds710_stack_validity )
+{
+ lfds710_pal_uint_t
+ number_elements = 0;
+
+ struct lfds710_stack_element
+ *se_fast,
+ *se_slow;
+
+ LFDS710_PAL_ASSERT( ss != NULL );
+ // TRD : vi can be NULL
+ LFDS710_PAL_ASSERT( lfds710_stack_validity != NULL );
+
+ *lfds710_stack_validity = LFDS710_MISC_VALIDITY_VALID;
+
+ se_slow = se_fast = (struct lfds710_stack_element *) ss->top[POINTER];
+
+ /* TRD : first, check for a loop
+ we have two pointers
+ both of which start at the top of the stack
+ we enter a loop
+ and on each iteration
+ we advance one pointer by one element
+ and the other by two
+
+ we exit the loop when both pointers are NULL
+ (have reached the end of the stack)
+
+ or
+
+ if we fast pointer 'sees' the slow pointer
+ which means we have a loop
+ */
+
+ if( se_slow != NULL )
+ do
+ {
+ se_slow = se_slow->next;
+
+ if( se_fast != NULL )
+ se_fast = se_fast->next;
+
+ if( se_fast != NULL )
+ se_fast = se_fast->next;
+ }
+ while( se_slow != NULL and se_fast != se_slow );
+
+ if( se_fast != NULL and se_slow != NULL and se_fast == se_slow )
+ *lfds710_stack_validity = LFDS710_MISC_VALIDITY_INVALID_LOOP;
+
+ /* TRD : now check for expected number of elements
+ vi can be NULL, in which case we do not check
+ we know we don't have a loop from our earlier check
+ */
+
+ if( *lfds710_stack_validity == LFDS710_MISC_VALIDITY_VALID and vi != NULL )
+ {
+ lfds710_stack_query( ss, LFDS710_STACK_QUERY_SINGLETHREADED_GET_COUNT, NULL, (void *) &number_elements );
+
+ if( number_elements < vi->min_elements )
+ *lfds710_stack_validity = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( number_elements > vi->max_elements )
+ *lfds710_stack_validity = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** public prototypes *****/
+#include "../inc/liblfds710.h"
+
+/***** defines *****/
+#define and &&
+#define or ||
+
+#define NO_FLAGS 0x0
+
+#define LFDS710_VERSION_STRING "7.1.0"
+#define LFDS710_VERSION_INTEGER 710
+
+#if( defined KERNEL_MODE )
+ #define MODE_TYPE_STRING "kernel-mode"
+#endif
+
+#if( !defined KERNEL_MODE )
+ #define MODE_TYPE_STRING "user-mode"
+#endif
+
+#if( defined NDEBUG && !defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "release"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "debug"
+#endif
+
+#if( !defined NDEBUG && defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "coverage"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "threadsanitizer"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN && defined PROF )
+ #define BUILD_TYPE_STRING "profiling"
+#endif
+
+#define LFDS710_BACKOFF_INITIAL_VALUE 0
+#define LFDS710_BACKOFF_LIMIT 10
+
+#define LFDS710_BACKOFF_EXPONENTIAL_BACKOFF( backoff_state, backoff_iteration ) \
+{ \
+ lfds710_pal_uint_t volatile \
+ loop; \
+ \
+ lfds710_pal_uint_t \
+ endloop; \
+ \
+ if( (backoff_iteration) == LFDS710_BACKOFF_LIMIT ) \
+ (backoff_iteration) = LFDS710_BACKOFF_INITIAL_VALUE; \
+ else \
+ { \
+ endloop = ( ((lfds710_pal_uint_t) 0x1) << (backoff_iteration) ) * (backoff_state).metric; \
+ for( loop = 0 ; loop < endloop ; loop++ ); \
+ } \
+ \
+ (backoff_iteration)++; \
+}
+
+#define LFDS710_BACKOFF_AUTOTUNE( bs, backoff_iteration ) \
+{ \
+ if( (backoff_iteration) < 2 ) \
+ (bs).backoff_iteration_frequency_counters[(backoff_iteration)]++; \
+ \
+ if( ++(bs).total_operations >= 10000 and (bs).lock == LFDS710_MISC_FLAG_LOWERED ) \
+ { \
+ char unsigned \
+ result; \
+ \
+ lfds710_pal_uint_t LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES) \
+ compare = LFDS710_MISC_FLAG_LOWERED; \
+ \
+ LFDS710_PAL_ATOMIC_CAS( &(bs).lock, &compare, LFDS710_MISC_FLAG_RAISED, LFDS710_MISC_CAS_STRENGTH_WEAK, result ); \
+ \
+ if( result == 1 ) \
+ { \
+ /* TRD : if E[1] is less than 1/100th of E[0], decrease the metric, to increase E[1] */ \
+ if( (bs).backoff_iteration_frequency_counters[1] < (bs).backoff_iteration_frequency_counters[0] / 100 ) \
+ { \
+ if( (bs).metric >= 11 ) \
+ (bs).metric -= 10; \
+ } \
+ else \
+ (bs).metric += 10; \
+ \
+ (bs).backoff_iteration_frequency_counters[0] = 0; \
+ (bs).backoff_iteration_frequency_counters[1] = 0; \
+ (bs).total_operations = 0; \
+ \
+ LFDS710_MISC_BARRIER_STORE; \
+ \
+ LFDS710_PAL_ATOMIC_SET( &(bs).lock, LFDS710_MISC_FLAG_LOWERED ); \
+ } \
+ } \
+}
+
+/***** library-wide prototypes *****/
+void lfds710_misc_internal_backoff_init( struct lfds710_misc_backoff_state *bs );
+
--- /dev/null
+##### paths #####
+BINDIR := ../../bin
+OBJDIR := ../../obj
+SRCDIR := ../../src
+LIBINCDIRS := ../../../../../liblfds7.0.0/liblfds700/inc/ ../../../../liblfds710/inc/ ../../../libbenchmark/inc/ ../../../libshared/inc/
+LIBBINDIRS := ../../../../../liblfds7.0.0/liblfds700/bin/ ../../../../liblfds710/bin/ ../../../libbenchmark/bin/ ../../../libshared/bin/
+
+##### misc #####
+QUIETLY := 1>/dev/null 2>/dev/null
+NULL :=
+SPACE := $(NULL) # TRD : with a trailing space
+
+##### sources, objects and libraries #####
+BINNAME := benchmark
+BINARY := $(BINDIR)/$(BINNAME)
+SRCDIRS := .
+SOURCES := main.c misc.c util_cmdline.c porting_abstraction_layer_numa_free.c porting_abstraction_layer_numa_malloc.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))
+SYSLIBS := -lm -lpthread -lrt
+USRLIBS := -lbenchmark -lshared -llfds710 -llfds700
+LIBINCDIRS := $(subst $(SPACE), -I,$(LIBINCDIRS))
+LIBBINDIRS := $(subst $(SPACE), -L,$(LIBBINDIRS))
+
+##### tools #####
+DG := gcc
+DGFLAGS_MANDATORY := -MM
+DGFLAGS_OPTIONAL :=
+
+CC := gcc
+CFLAGS_MANDATORY := -c -pthread -I$(LIBINCDIRS)
+CFLAGS_OPTIONAL := -Wall -Werror -Wno-unknown-pragmas
+CFLAGS_MANDATORY_COV := -O0 -ggdb -DCOVERAGE -fprofile-arcs -ftest-coverage
+CFLAGS_MANDATORY_DBG := -O0 -ggdb -D_DEBUG
+CFLAGS_MANDATORY_PROF := -O0 -ggdb -DPROF -pg
+CFLAGS_MANDATORY_REL := -O2 -DNDEBUG
+CFLAGS_MANDATORY_TSAN := -O0 -ggdb -DTSAN -fsanitize=thread -fPIC
+
+LD := gcc
+LDFLAGS_MANDATORY := -pthread -L$(LIBBINDIRS)
+LDFLAGS_OPTIONAL := -Wall -Werror
+LDFLAGS_MANDATORY_COV := -O0 -fprofile-arcs -ftest-coverage
+LDFLAGS_MANDATORY_DBG := -O0 -ggdb
+LDFLAGS_MANDATORY_PROF := -O0 -pg
+LDFLAGS_MANDATORY_REL := -O2 -s
+LDFLAGS_MANDATORY_TSAN := -O0 -fsanitize=thread -pie
+
+##### build variants #####
+ifeq ($(MAKECMDGOALS),) # TRD : default to debug
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring cov,$(MAKECMDGOALS)),cov)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_COV)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_COV)
+ SYSLIBS += -lgcov
+endif
+
+ifeq ($(findstring dbg,$(MAKECMDGOALS)),dbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring prof,$(MAKECMDGOALS)),prof)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_PROF)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_PROF)
+endif
+
+ifeq ($(findstring rel,$(MAKECMDGOALS)),rel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+endif
+
+ifeq ($(findstring tsan,$(MAKECMDGOALS)),tsan)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_TSAN)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_TSAN)
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.o : %.c
+ $(DG) $(DGFLAGS_OPTIONAL) $(DGFLAGS) $(DGFLAGS_MANDATORY) $< >$(OBJDIR)/$*.d
+ $(CC) $(CFLAGS_OPTIONAL) $(CFLAGS) $(CFLAGS_MANDATORY) -o $@ $<
+
+##### explicit rules #####
+$(BINARY) : $(OBJECTS)
+ $(LD) -o $(BINARY) $(LDFLAGS_OPTIONAL) $(LDFLAGS) $(LDFLAGS_MANDATORY) $(OBJECTS) $(USRLIBS) $(SYSLIBS)
+ chmod +x $(BINARY)
+
+##### phony #####
+.PHONY : clean cov dbg prof rel tsan vanilla
+
+clean :
+ @rm -f $(BINDIR)/$(BINNAME) $(OBJDIR)/*.o $(OBJDIR)/*.d $(OBJDIR)/*.gcda $(OBJDIR)/*.gcno
+
+cov : $(BINARY)
+dbg : $(BINARY)
+prof : $(BINARY)
+rel : $(BINARY)
+tsan : $(BINARY)
+vanilla : $(BINARY)
+
+##### dependencies #####
+-include $(DEPENDS)
+
--- /dev/null
+##### paths #####
+BINDIR := ../../bin
+OBJDIR := ../../obj
+SRCDIR := ../../src
+LIBINCDIRS := ../../../../../liblfds7.0.0/liblfds700/inc/ ../../../../liblfds710/inc/ ../../../libbenchmark/inc/ ../../../libshared/inc/
+LIBBINDIRS := ../../../../../liblfds7.0.0/liblfds700/bin/ ../../../../liblfds710/bin/ ../../../libbenchmark/bin/ ../../../libshared/bin/
+
+##### misc #####
+QUIETLY := 1>/dev/null 2>/dev/null
+NULL :=
+SPACE := $(NULL) # TRD : with a trailing space
+
+##### sources, objects and libraries #####
+BINNAME := benchmark
+BINARY := $(BINDIR)/$(BINNAME)
+SRCDIRS := .
+SOURCES := main.c misc.c util_cmdline.c porting_abstraction_layer_numa_free.c porting_abstraction_layer_numa_malloc.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))
+SYSLIBS := -lm -lnuma -lpthread -lrt
+USRLIBS := -lbenchmark -lshared -llfds710 -llfds700
+LIBINCDIRS := $(subst $(SPACE), -I,$(LIBINCDIRS))
+LIBBINDIRS := $(subst $(SPACE), -L,$(LIBBINDIRS))
+
+##### tools #####
+DG := gcc
+DGFLAGS_MANDATORY := -MM
+DGFLAGS_OPTIONAL :=
+
+CC := gcc
+CFLAGS_MANDATORY := -c -pthread -I$(LIBINCDIRS) -DLIBNUMA
+CFLAGS_OPTIONAL := -Wall -Werror -Wno-unknown-pragmas
+CFLAGS_MANDATORY_COV := -O0 -ggdb -DCOVERAGE -fprofile-arcs -ftest-coverage
+CFLAGS_MANDATORY_DBG := -O0 -ggdb -D_DEBUG
+CFLAGS_MANDATORY_PROF := -O0 -ggdb -DPROF -pg
+CFLAGS_MANDATORY_REL := -O2 -DNDEBUG
+CFLAGS_MANDATORY_TSAN := -O0 -ggdb -DTSAN -fsanitize=thread -fPIC
+
+LD := gcc
+LDFLAGS_MANDATORY := -pthread -L$(LIBBINDIRS)
+LDFLAGS_OPTIONAL := -Wall -Werror
+LDFLAGS_MANDATORY_COV := -O0 -fprofile-arcs -ftest-coverage
+LDFLAGS_MANDATORY_DBG := -O0 -ggdb
+LDFLAGS_MANDATORY_PROF := -O0 -pg
+LDFLAGS_MANDATORY_REL := -O2 -s
+LDFLAGS_MANDATORY_TSAN := -O0 -fsanitize=thread -pie
+
+##### build variants #####
+ifeq ($(MAKECMDGOALS),) # TRD : default to debug
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring cov,$(MAKECMDGOALS)),cov)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_COV)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_COV)
+ SYSLIBS += -lgcov
+endif
+
+ifeq ($(findstring dbg,$(MAKECMDGOALS)),dbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring prof,$(MAKECMDGOALS)),prof)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_PROF)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_PROF)
+endif
+
+ifeq ($(findstring rel,$(MAKECMDGOALS)),rel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+endif
+
+ifeq ($(findstring tsan,$(MAKECMDGOALS)),tsan)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_TSAN)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_TSAN)
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.o : %.c
+ $(DG) $(DGFLAGS_OPTIONAL) $(DGFLAGS) $(DGFLAGS_MANDATORY) $< >$(OBJDIR)/$*.d
+ $(CC) $(CFLAGS_OPTIONAL) $(CFLAGS) $(CFLAGS_MANDATORY) -o $@ $<
+
+##### explicit rules #####
+$(BINARY) : $(OBJECTS)
+ $(LD) -o $(BINARY) $(LDFLAGS_OPTIONAL) $(LDFLAGS) $(LDFLAGS_MANDATORY) $(OBJECTS) $(USRLIBS) $(SYSLIBS)
+ chmod +x $(BINARY)
+
+##### phony #####
+.PHONY : clean cov dbg prof rel tsan vanilla
+
+clean :
+ @rm -f $(BINDIR)/$(BINNAME) $(OBJDIR)/*.o $(OBJDIR)/*.d $(OBJDIR)/*.gcda $(OBJDIR)/*.gcno
+
+cov : $(BINARY)
+dbg : $(BINARY)
+prof : $(BINARY)
+rel : $(BINARY)
+tsan : $(BINARY)
+vanilla : $(BINARY)
+
+##### dependencies #####
+-include $(DEPENDS)
+
--- /dev/null
+##### paths #####
+BINDIR := ..\..\bin
+OBJDIR := ..\..\obj
+SRCDIR := ..\..\src
+
+##### misc #####
+QUIETLY := 1>nul 2>nul
+
+##### sources, objects and libraries #####
+BINNAME := benchmark
+BINARY := $(BINDIR)/$(BINNAME).exe
+SRCDIRS := .
+SOURCES := main.c misc.c util_cmdline.c porting_abstraction_layer_numa_free.c porting_abstraction_layer_numa_malloc.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))
+SYSLIBS := kernel32.lib
+EXTLIBS :=
+USRLIBS := ../../../../liblfds710/bin/liblfds710.lib ../../../../../liblfds7.0.0/liblfds700/bin/liblfds700.lib ../../../../test_and_benchmark/libshared/bin/libshared.lib ../../../../test_and_benchmark/libbenchmark/bin/libbenchmark.lib
+
+##### tools #####
+CC := cl
+CFLAGS_MANDATORY :=
+CFLAGS_OPTIONAL := /nologo /W4 /WX /c "/Fd$(BINDIR)\$(BINNAME).pdb" /DUNICODE /D_UNICODE /D_CRT_SECURE_NO_DEPRECATE /DWIN32_LEAN_AND_MEAN
+CFLAGS_MANDATORY_DBG := /Od /Gm /Zi /D_DEBUG
+CFLAGS_MANDATORY_REL := /Ox /DNDEBUG
+
+LD := link
+LDFLAGS_MANDATORY := /nodefaultlib /subsystem:console
+LDFLAGS_OPTIONAL := /nologo /nxcompat /wx
+LDFLAGS_MANDATORY_DBG := /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"
+LDFLAGS_MANDATORY_REL := /incremental:no
+
+##### build variants #####
+ifeq ($(MAKECMDGOALS),) # TRD : default to lib debug
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MTd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),libdbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MTd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),librel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL) /MT
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+ CLIB := libcmt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dlldbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MDd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := msvcrtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dllrel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL) /MD
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+ CLIB := msvcrt.lib
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.obj : %.c
+ $(CC) $(CFLAGS_OPTIONAL) $(CFLAGS) $(CFLAGS_MANDATORY) "/Fo$@" $<
+
+##### explicit rules #####
+$(BINARY) : $(OBJECTS)
+ $(LD) $(LDFLAGS_OPTIONAL) $(LDFLAGS) $(LDFLAGS_MANDATORY) $(CLIB) $(SYSLIBS) $(EXTLIBS) $(USRLIBS) $(OBJECTS) /out:$(BINARY)
+
+##### phony #####
+.PHONY : clean librel libdbg dllrel dlldbg
+
+clean :
+ @erase /Q $(OBJDIR)\*.obj $(OBJDIR)\*.res $(BINDIR)\$(BINNAME).* $(QUIETLY)
+
+dlldbg : $(BINARY)
+dllrel : $(BINARY)
+libdbg : $(BINARY)
+librel : $(BINARY)
+
+##### dependencies #####
+-include $(DEPENDS)
+
--- /dev/null
+/***** includes *****/
+#include "porting_abstraction_layer.h"
+#include "porting_abstraction_layer_operating_system.h"
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include "../../../liblfds710/inc/liblfds710.h"
+#include "../../libbenchmark/inc/libbenchmark.h"
+#include "util_cmdline.h"
+
+/***** defines *****/
+#define and &&
+#define or ||
+
+#define BITS_PER_BYTE 8
+
+#define NO_FLAGS 0x0
+
+#define BENCHMARK_VERSION_STRING "7.1.0"
+#define BENCHMARK_VERSION_INTEGER 710
+
+#if( defined KERNEL_MODE )
+ #define MODE_TYPE_STRING "kernel-mode"
+#endif
+
+#if( !defined KERNEL_MODE )
+ #define MODE_TYPE_STRING "user-mode"
+#endif
+
+#if( defined NDEBUG && !defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "release"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "debug"
+#endif
+
+#if( !defined NDEBUG && defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "coverage"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "threadsanitizer"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN && defined PROF )
+ #define BUILD_TYPE_STRING "profiling"
+#endif
+
+#define ONE_KILOBYTES_IN_BYTES 1024
+#define ONE_MEGABYTE_IN_BYTES (ONE_KILOBYTES_IN_BYTES * 1024)
+#define BENCHMARK_DEFAULT_MEMORY_IN_MEGABYTES 64
+
+/***** enums *****/
+
+/***** structs *****/
+
+/***** externs *****/
+
+/***** prototypes *****/
+int main( int argc, char **argv );
+void internal_show_version( void );
+void callback_stdout( char *string );
+
+void *benchmark_pal_numa_malloc( lfds710_pal_uint_t numa_node_id, lfds710_pal_uint_t size_in_bytes );
+void benchmark_pal_numa_free( void *memory, lfds710_pal_uint_t size_in_bytes );
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+/***** structs *****/
+struct operations_to_perform
+{
+ char
+ *gnuplot_system_string;
+
+ enum flag
+ benchmark_duration_flag,
+ gnuplot_file_flag,
+ gnuplot_logarithmic_yaxis_flag,
+ gnuplot_png_flag,
+ gnuplot_height_flag,
+ gnuplot_width_flag,
+ run_flag,
+ show_cpu_topology_only_flag,
+ show_error_flag,
+ show_help_flag,
+ show_version_flag,
+ memory_flag;
+
+ lfds710_pal_uint_t
+ memory_in_megabytes,
+ benchmark_duration_in_seconds,
+ gnuplot_height_in_pixels,
+ gnuplot_width_in_pixels;
+};
+
+/***** prototypes *****/
+static void convert_command_line_args_to_operations_to_perform( int argc, char **argv, struct operations_to_perform *otp );
+static void perform_operations( struct operations_to_perform *otp );
+static void memory_cleanup_callback( enum flag known_numa_node_flag, void *store, lfds710_pal_uint_t size_in_bytes );
+
+
+
+
+
+/****************************************************************************/
+int main( int argc, char **argv )
+{
+ struct operations_to_perform
+ otp;
+
+ assert( argc >= 1 );
+ assert( argv != NULL );
+
+ convert_command_line_args_to_operations_to_perform( argc, argv, &otp );
+
+ perform_operations( &otp );
+
+ return EXIT_SUCCESS;
+}
+
+
+
+
+
+/****************************************************************************/
+static void convert_command_line_args_to_operations_to_perform( int argc, char **argv, struct operations_to_perform *otp )
+{
+ int
+ rv;
+
+ struct util_cmdline_state
+ cs;
+
+ union util_cmdline_arg_data
+ *arg_data;
+
+ assert( argc >= 1 );
+ assert( argv != NULL );
+ assert( otp != NULL );
+
+ otp->benchmark_duration_flag = LOWERED;
+ otp->gnuplot_file_flag = LOWERED;
+ otp->gnuplot_logarithmic_yaxis_flag = LOWERED;
+ otp->gnuplot_png_flag = LOWERED;
+ otp->gnuplot_height_flag = LOWERED;
+ otp->gnuplot_width_flag = LOWERED;
+ otp->run_flag = LOWERED;
+ otp->show_cpu_topology_only_flag = LOWERED;
+ otp->show_error_flag = LOWERED;
+ otp->show_help_flag = RAISED;
+ otp->show_version_flag = LOWERED;
+ otp->memory_flag = LOWERED;
+
+ /* TRD : the numeric options are used by libbenchmark
+ if we pass in a bitmark indicating they are set
+ however, we ourselves use otp->memory_in_megabytes
+ when we alloc for printing topology, so we need
+ to initialize it
+ */
+
+ otp->memory_in_megabytes = BENCHMARK_DEFAULT_MEMORY_IN_MEGABYTES;
+
+ util_cmdline_init( &cs );
+
+ util_cmdline_add_arg( &cs, 'g', UTIL_CMDLINE_ARG_TYPE_STRING );
+ util_cmdline_add_arg( &cs, 'h', UTIL_CMDLINE_ARG_TYPE_FLAG );
+ util_cmdline_add_arg( &cs, 'l', UTIL_CMDLINE_ARG_TYPE_FLAG );
+ util_cmdline_add_arg( &cs, 'm', UTIL_CMDLINE_ARG_TYPE_INTEGER );
+ util_cmdline_add_arg( &cs, 'p', UTIL_CMDLINE_ARG_TYPE_FLAG );
+ util_cmdline_add_arg( &cs, 'r', UTIL_CMDLINE_ARG_TYPE_FLAG );
+ util_cmdline_add_arg( &cs, 's', UTIL_CMDLINE_ARG_TYPE_INTEGER );
+ util_cmdline_add_arg( &cs, 't', UTIL_CMDLINE_ARG_TYPE_FLAG );
+ util_cmdline_add_arg( &cs, 'v', UTIL_CMDLINE_ARG_TYPE_FLAG );
+ util_cmdline_add_arg( &cs, 'x', UTIL_CMDLINE_ARG_TYPE_INTEGER );
+ util_cmdline_add_arg( &cs, 'y', UTIL_CMDLINE_ARG_TYPE_INTEGER );
+
+ rv = util_cmdline_process_args( &cs, argc, argv );
+
+ if( rv == 0 )
+ otp->show_error_flag = RAISED;
+
+ if( rv == 1 )
+ {
+ util_cmdline_get_arg_data( &cs, 'g', &arg_data );
+ if( arg_data != NULL )
+ {
+ otp->gnuplot_file_flag = RAISED;
+ otp->gnuplot_system_string = arg_data->string.string;
+ }
+
+ util_cmdline_get_arg_data( &cs, 'h', &arg_data );
+ if( arg_data != NULL )
+ otp->show_help_flag = RAISED;
+
+ util_cmdline_get_arg_data( &cs, 'l', &arg_data );
+ if( arg_data != NULL )
+ otp->gnuplot_logarithmic_yaxis_flag = RAISED;
+
+ util_cmdline_get_arg_data( &cs, 'm', &arg_data );
+ if( arg_data != NULL )
+ {
+ if( arg_data->integer.integer < 2 )
+ {
+ puts( "Memory (in megabytes) needs to be 2 or greater." );
+ exit( EXIT_FAILURE );
+ }
+
+ otp->memory_in_megabytes = (lfds710_pal_uint_t) arg_data->integer.integer;
+ otp->memory_flag = RAISED;
+ }
+
+ util_cmdline_get_arg_data( &cs, 'p', &arg_data );
+ if( arg_data != NULL )
+ {
+ otp->gnuplot_png_flag = RAISED;
+
+ // TRD : if -p, -g must be present
+ util_cmdline_get_arg_data( &cs, 'g', &arg_data );
+ if( arg_data == NULL )
+ {
+ puts( "If -p is given, -g must also be given." );
+ exit( EXIT_FAILURE );
+ }
+ }
+
+ util_cmdline_get_arg_data( &cs, 'r', &arg_data );
+ if( arg_data != NULL )
+ {
+ otp->show_help_flag = LOWERED;
+ otp->run_flag = RAISED;
+ }
+
+ util_cmdline_get_arg_data( &cs, 's', &arg_data );
+ if( arg_data != NULL )
+ {
+ if( arg_data->integer.integer < 1 )
+ {
+ puts( "Duration in seconds needs to be 1 or greater." );
+ exit( EXIT_FAILURE );
+ }
+
+ otp->benchmark_duration_in_seconds = (lfds710_pal_uint_t) arg_data->integer.integer;
+ otp->benchmark_duration_flag = RAISED;
+ }
+
+ util_cmdline_get_arg_data( &cs, 't', &arg_data );
+ if( arg_data != NULL )
+ {
+ otp->show_help_flag = LOWERED;
+ otp->show_cpu_topology_only_flag = RAISED;
+ }
+
+ util_cmdline_get_arg_data( &cs, 'v', &arg_data );
+ if( arg_data != NULL )
+ {
+ otp->show_help_flag = LOWERED;
+ otp->show_version_flag = RAISED;
+ }
+
+ util_cmdline_get_arg_data( &cs, 'x', &arg_data );
+ if( arg_data != NULL )
+ {
+ if( arg_data->integer.integer < 1 )
+ {
+ puts( "Gnuplot width in pixels needs to be 1 or greater." );
+ exit( EXIT_FAILURE );
+ }
+
+ otp->gnuplot_width_in_pixels = (lfds710_pal_uint_t) arg_data->integer.integer;
+ otp->gnuplot_width_flag = RAISED;
+ }
+
+ util_cmdline_get_arg_data( &cs, 'y', &arg_data );
+ if( arg_data != NULL )
+ {
+ if( arg_data->integer.integer < 1 )
+ {
+ puts( "Gnuplot height in pixels needs to be 1 or greater." );
+ exit( EXIT_FAILURE );
+ }
+
+ otp->gnuplot_height_in_pixels = (lfds710_pal_uint_t) arg_data->integer.integer;
+ otp->gnuplot_height_flag = RAISED;
+ }
+ }
+
+ util_cmdline_cleanup( &cs );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void perform_operations( struct operations_to_perform *otp )
+{
+ assert( otp != NULL );
+
+ if( otp->show_error_flag == RAISED )
+ {
+ printf( "\nInvalid arguments. Sorry - it's a simple parser, so no clues.\n"
+ "-h or run with no args to see the help text.\n" );
+
+ return;
+ }
+
+ if( otp->show_help_flag == RAISED )
+ {
+ printf( "benchmark -g [s] -h -l -m [n] -p -r -s [n] -t -v -x [n] -y [n]\n"
+ " -g [s] : emit gnuplots, where [s] is an arbitrary string (in quotes if spaces) describing the system\n"
+ " -h : help (this text you're reading now)\n"
+ " -l : logarithmic gnuplot y-axis (normally linear)\n"
+ " -m [n] : alloc [n] mb RAM for benchmarks, default is %u (minimum 2 (two))\n"
+ " (user specifies RAM as libbenchmark performs no allocs - rather it is handed a block of memory\n"
+ " on NUMA systems, each node allocates an equal fraction of the total - benchmark knows about\n"
+ " NUMA and does the right things, including NUMA and non-NUMA versions of the benchmarks)\n"
+ " -p : call gnuplot to emit PNGs (requires -g and gnuplot must be on the path)\n"
+ " -r : run (causes benchmarks to run; present so no args gives help)\n"
+ " -s [n] : individual benchmark duration in integer seconds (min 1, duh)\n"
+ " -t : show CPU topology, uses -m (or its default) for amount of RAM to alloc\n"
+ " -v : build and version info\n"
+ " -x [n] : gnuplot width in pixels (in case the computed values are no good)\n"
+ " -y [n] : gnuplot height in pixels (in case the computed values are no good)\n",
+ (int unsigned) BENCHMARK_DEFAULT_MEMORY_IN_MEGABYTES );
+
+ #if( BENCHMARK_PAL_MEMORY_TYPE == BENCHMARK_MEMORY_TYPE_SMP )
+ {
+ printf( "\n"
+ "WARNING : This is the SMP build of benchmark. Do not use it on a NUMA\n"
+ " system as the results will be wrong - and not a bit wrong, but\n"
+ " VERY VERY WRONG.\n"
+ "\n"
+ " The benchmarks measure the performance of liblfds, but to do so,\n"
+ " themselves have work to do, which involves what can be many\n"
+ " memory accesses, and so threads running on the primary NUMA node\n"
+ " have no penalty for say twenty memory accesses, whereas threads\n"
+ " off the primary NUMA node are paying the penalty for all of them,\n"
+ " when maybe only five of those accesses are actually by liblfds.\n"
+ "\n"
+ " As a result, SMP builds on NUMA systems make off-primary-node\n"
+ " threads look MUCH worse than they actually are, because you think\n"
+ " they're only measuring liblfds, when in fact they're not.\n" );
+ }
+ #endif
+
+ return;
+ }
+
+ if( otp->show_cpu_topology_only_flag == RAISED )
+ {
+ char
+ *topology_string;
+
+ struct libshared_memory_state
+ ms;
+
+ struct libbenchmark_topology_state
+ ts;
+
+ void
+ *store;
+
+ libshared_memory_init( &ms );
+ store = malloc( otp->memory_in_megabytes * ONE_MEGABYTE_IN_BYTES );
+ libshared_memory_add_memory( &ms, store, otp->memory_in_megabytes * ONE_MEGABYTE_IN_BYTES );
+ libbenchmark_topology_init( &ts, &ms );
+ topology_string = libbenchmark_topology_generate_string( &ts, &ms, LIBBENCHMARK_TOPOLOGY_STRING_FORMAT_STDOUT );
+ printf( "%s", topology_string );
+ libbenchmark_topology_cleanup( &ts );
+ libshared_memory_cleanup( &ms, NULL );
+ free( store );
+
+ return;
+ }
+
+ if( otp->run_flag == RAISED )
+ {
+ char
+ diskbuffer[BUFSIZ];
+
+ enum libbenchmark_topology_numa_mode
+ numa_mode = LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP;
+
+ FILE
+ *diskfile;
+
+ lfds710_pal_uint_t
+ options_bitmask = NO_FLAGS,
+ size_in_bytes;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_list_asu_state
+ list_of_gnuplots;
+
+ struct libbenchmark_benchmarkset_gnuplot
+ *bg;
+
+ struct libbenchmark_benchmarksuite_state
+ bss;
+
+ struct libshared_memory_state
+ ms_for_benchmarks,
+ ms_for_rs_and_ts;
+
+ struct libbenchmark_topology_state
+ ts;
+
+ struct libbenchmark_results_state
+ rs;
+
+ void
+ *store;
+
+ // TRD : for the liblfds700 benchmarks
+ lfds700_misc_library_init_valid_on_current_logical_core();
+
+ libshared_memory_init( &ms_for_rs_and_ts );
+ size_in_bytes = (otp->memory_in_megabytes / 2) * ONE_MEGABYTE_IN_BYTES;
+ store = malloc( size_in_bytes );
+ libshared_memory_add_memory( &ms_for_rs_and_ts, store, size_in_bytes );
+ libbenchmark_topology_init( &ts, &ms_for_rs_and_ts );
+
+ libshared_memory_init( &ms_for_benchmarks );
+
+ #if( BENCHMARK_PAL_MEMORY_TYPE == BENCHMARK_MEMORY_TYPE_SMP )
+ numa_mode = LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP;
+ size_in_bytes = (otp->memory_in_megabytes / 2) * ONE_MEGABYTE_IN_BYTES;
+ store = malloc( size_in_bytes );
+ libshared_memory_add_memory( &ms_for_benchmarks, store, size_in_bytes );
+ #endif
+
+ #if( BENCHMARK_PAL_MEMORY_TYPE == BENCHMARK_MEMORY_TYPE_NUMA )
+ {
+ lfds710_pal_uint_t
+ numa_node_id,
+ number_numa_nodes;
+
+ struct libbenchmark_topology_iterate_state
+ tis;
+
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ numa_mode = LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA;
+
+ libbenchmark_topology_query( &ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMBER_OF_NODE_TYPE, (void *) (lfds710_pal_uint_t) LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA, (void *) &number_numa_nodes );
+
+ libbenchmark_topology_iterate_init( &tis, LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA );
+
+ while( libbenchmark_topology_iterate(&ts, &tis, &tns) )
+ {
+ numa_node_id = LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID( *tns );
+ size_in_bytes = ( (otp->memory_in_megabytes / 2) * ONE_MEGABYTE_IN_BYTES ) / number_numa_nodes;
+ store = benchmark_pal_numa_malloc( (int) numa_node_id, size_in_bytes );
+ libshared_memory_add_memory_from_numa_node( &ms_for_benchmarks, numa_node_id, store, size_in_bytes );
+ }
+ }
+ #endif
+
+ if( otp->benchmark_duration_flag == RAISED )
+ options_bitmask |= LIBBENCHMARK_BENCHMARKSUITE_OPTION_DURATION;
+
+ libbenchmark_benchmarksuite_init( &bss, &ts, &ms_for_benchmarks, numa_mode, options_bitmask, otp->benchmark_duration_in_seconds );
+
+ libbenchmark_results_init( &rs, &ms_for_rs_and_ts );
+
+ libbenchmark_benchmarksuite_run( &bss, &rs );
+
+ if( otp->gnuplot_file_flag == RAISED or otp->gnuplot_png_flag == RAISED )
+ {
+ char
+ system_command[1024];
+
+ struct libbenchmark_gnuplot_options
+ gpo;
+
+ LIBBENCHMARK_GNUPLOT_OPTIONS_INIT( gpo );
+
+ if( otp->gnuplot_logarithmic_yaxis_flag == RAISED )
+ LIBBENCHMARK_GNUPLOT_OPTIONS_SET_Y_AXIS_SCALE_TYPE_LOGARITHMIC( gpo );
+
+ if( otp->gnuplot_height_flag == RAISED )
+ LIBBENCHMARK_GNUPLOT_OPTIONS_SET_HEIGHT_IN_PIXELS( gpo, otp->gnuplot_height_in_pixels );
+
+ if( otp->gnuplot_width_flag == RAISED )
+ LIBBENCHMARK_GNUPLOT_OPTIONS_SET_WIDTH_IN_PIXELS( gpo, otp->gnuplot_width_in_pixels );
+
+ libbenchmark_benchmarksuite_get_list_of_gnuplot_strings( &bss, &rs, otp->gnuplot_system_string, &gpo, &list_of_gnuplots );
+
+ // TRD : write the gnuplot strings to disk
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(list_of_gnuplots,lasue) )
+ {
+ bg = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ diskfile = fopen( bg->filename, "w" );
+ setbuf( diskfile, diskbuffer );
+ // TRD : standard only requires fprintf() to support up to 509 characters of output
+ fwrite( bg->gnuplot_string, libshared_ansi_strlen(bg->gnuplot_string), 1, diskfile );
+ fclose( diskfile );
+
+ if( otp->gnuplot_png_flag == RAISED )
+ {
+ sprintf( system_command, "gnuplot \"%s\"", bg->filename );
+ system( system_command );
+ }
+ }
+ }
+
+ libbenchmark_results_cleanup( &rs );
+
+ libbenchmark_benchmarksuite_cleanup( &bss );
+
+ libshared_memory_cleanup( &ms_for_benchmarks, memory_cleanup_callback );
+
+ libshared_memory_cleanup( &ms_for_rs_and_ts, memory_cleanup_callback );
+
+ lfds700_misc_library_cleanup();
+ }
+
+ if( otp->show_version_flag == RAISED )
+ internal_show_version();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void memory_cleanup_callback( enum flag known_numa_node_flag, void *store, lfds710_pal_uint_t size_in_bytes )
+{
+ assert( store != NULL );
+ // TRD : size_in_bytes can be any value in its range
+
+ #if( BENCHMARK_PAL_MEMORY_TYPE == BENCHMARK_MEMORY_TYPE_SMP )
+ free( store );
+ #endif
+
+ #if( BENCHMARK_PAL_MEMORY_TYPE == BENCHMARK_MEMORY_TYPE_NUMA )
+ benchmark_pal_numa_free( store, size_in_bytes );
+ #endif
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void internal_show_version()
+{
+ char const
+ *version_and_build_string;
+
+ char static const
+ * const local_build_and_version_string = "benchmark " BENCHMARK_VERSION_STRING " (" BUILD_TYPE_STRING ", " MODE_TYPE_STRING ", " BENCHMARK_PAL_MEMORY_TYPE_STRING ")";
+
+ printf( "%s\n", local_build_and_version_string );
+
+ libbenchmark_misc_query( LIBBENCHMARK_MISC_QUERY_GET_BUILD_AND_VERSION_STRING, NULL, (void **) &version_and_build_string );
+
+ printf( "%s\n", version_and_build_string );
+
+ libshared_misc_query( LIBSHARED_MISC_QUERY_GET_BUILD_AND_VERSION_STRING, NULL, (void **) &version_and_build_string );
+
+ printf( "%s\n", version_and_build_string );
+
+ lfds710_misc_query( LFDS710_MISC_QUERY_GET_BUILD_AND_VERSION_STRING, NULL, (void **) &version_and_build_string );
+
+ printf( "%s\n", version_and_build_string );
+
+ lfds700_misc_query( LFDS700_MISC_QUERY_GET_BUILD_AND_VERSION_STRING, NULL, (void **) &version_and_build_string );
+
+ printf( "%s\n", version_and_build_string );
+
+ return;
+}
+
--- /dev/null
+/***** defines *****/
+#define BENCHMARK_MEMORY_TYPE_SMP 1
+#define BENCHMARK_MEMORY_TYPE_NUMA 2
+
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && !defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_WINXP )
+
+ #pragma warning( disable : 4100 )
+
+ void benchmark_pal_numa_free( void *memory, lfds710_pal_uint_t size_in_bytes )
+ {
+ HANDLE
+ process_handle;
+
+ assert( memory != NULL );
+ // TRD : size_in_bytes can be any value in its range
+
+ process_handle = GetCurrentProcess();
+
+ VirtualFreeEx( process_handle, memory, 0, MEM_RELEASE );
+
+ return;
+ }
+
+ #pragma warning( default : 4100 )
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && defined LIBNUMA )
+
+ #ifdef BENCHMARK_PAL_NUMA_FREE
+ #error More than one porting abstraction layer matches the current platform in porting_abstraction_free.c
+ #endif
+
+ #define BENCHMARK_PAL_NUMA_FREE
+
+ void benchmark_pal_numa_free( void *memory, lfds710_pal_uint_t size_in_bytes )
+ {
+ assert( memory != NULL );
+ // TRD : size_in_bytes can be any value in its range
+
+ #if( defined _POSIX_MEMLOCK_RANGE > 0 )
+ munlock( memory, size_in_bytes );
+ #endif
+
+ numa_free( memory, size_in_bytes );
+
+ return;
+ }
+
+#endif
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && !defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_VISTA )
+
+ #ifdef BENCHMARK_PAL_NUMA_MALLOC
+ #error More than one porting abstraction layer matches the current platform in porting_abstraction_malloc.c
+ #endif
+
+ #define BENCHMARK_PAL_NUMA_MALLOC
+
+ void *benchmark_pal_numa_malloc( lfds710_pal_uint_t numa_node_id, lfds710_pal_uint_t size_in_bytes )
+ {
+ HANDLE
+ process_handle;
+
+ LPVOID
+ memory;
+
+ // TRD : numa_node_id can be any value in its range
+ // TRD : size_in_bytes can be any value in its range
+
+ process_handle = GetCurrentProcess();
+
+ memory = VirtualAllocExNuma( process_handle, NULL, size_in_bytes, MEM_COMMIT, PAGE_READWRITE, (DWORD) numa_node_id );
+
+ return memory;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && defined LIBNUMA )
+
+ #ifdef BENCHMARK_PAL_NUMA_MALLOC
+ #error More than one porting abstraction layer matches the current platform in porting_abstraction_malloc.c
+ #endif
+
+ #define BENCHMARK_PAL_NUMA_MALLOC
+
+ void *benchmark_pal_numa_malloc( lfds710_pal_uint_t numa_node_id, lfds710_pal_uint_t size_in_bytes )
+ {
+ void
+ *memory;
+
+ // TRD : numa_node_id can be any value in its range
+ // TRD : size_in_bytes can be any value in its range
+
+ memory = numa_alloc_onnode( size_in_bytes, (int) numa_node_id );
+
+ /* TRD : mlock prevents paging
+ this is unfortunately necessary on Linux
+ due to serious shortcomings in the way NUMA is handled
+
+ in particular that the NUMA node is re-chosen if a memory page is paged out and then paged back in
+ but also because Linux doesn't page in a single page at a time, but a line of pages
+ so another process can end up moving *your* pages into *its* NUMA node (e.g. your pages are
+ in the line of pages), because the NUMA policy for *its* pages would put them in that node!
+
+ it seems to me this is one of the very rare occasions
+ where Windows has something right and Linux has it wrong
+ (Windows has the notion of an ideal NUMA node for a thread, and continually works
+ to move any pages which leave that node back into that node, and on page-in will
+ try first to re-use that node)
+
+ since we use small amounts of memory, I address the whole sorry mess
+ simply by locking the pages into memory - this way they will stay in the NUMA node
+ they were allocated into (assuming they've not been paged out and then back in,
+ between the numa_alloc_onnode() call and the mlock() call)
+ */
+
+ #if( defined _POSIX_MEMLOCK_RANGE > 0 )
+ mlock( memory, size_in_bytes );
+ #endif
+
+ return memory;
+ }
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _MSC_VER )
+ /* TRD : MSVC compiler
+
+ an unfortunately necessary hack for MSVC
+ MSVC only defines __STDC__ if /Za is given, where /Za turns off MSVC C extensions -
+ which prevents Windows header files from compiling.
+ */
+
+ #define __STDC__ 1
+ #define __STDC_HOSTED__ 1
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && NTDDI_VERSION < NTDDI_VISTA )
+
+ #ifdef BENCHMARK_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform.
+ #endif
+
+ #define BENCHMARK_PAL_OPERATING_SYSTEM
+
+ #define BENCHMARK_PAL_OS_STRING "Windows"
+ #define BENCHMARK_PAL_MEMORY_TYPE BENCHMARK_MEMORY_TYPE_SMP
+ #define BENCHMARK_PAL_MEMORY_TYPE_STRING "SMP"
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && NTDDI_VERSION >= NTDDI_VISTA )
+
+ #ifdef BENCHMARK_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform.
+ #endif
+
+ #define BENCHMARK_PAL_OPERATING_SYSTEM
+
+ #define BENCHMARK_PAL_OS_STRING "Windows"
+ #define BENCHMARK_PAL_MEMORY_TYPE BENCHMARK_MEMORY_TYPE_NUMA
+ #define BENCHMARK_PAL_MEMORY_TYPE_STRING "NUMA"
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && !defined KERNEL_MODE && !defined LIBNUMA )
+
+ #ifdef BENCHMARK_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform.
+ #endif
+
+ #define BENCHMARK_PAL_OPERATING_SYSTEM
+
+ #define BENCHMARK_PAL_OS_STRING "Linux"
+ #define BENCHMARK_PAL_MEMORY_TYPE BENCHMARK_MEMORY_TYPE_SMP
+ #define BENCHMARK_PAL_MEMORY_TYPE_STRING "SMP"
+
+ #include <unistd.h>
+ #if( _POSIX_MEMLOCK_RANGE > 0 )
+ #include <sys/mman.h>
+ #endif
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && !defined KERNEL_MODE && defined LIBNUMA )
+
+ #ifdef BENCHMARK_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform.
+ #endif
+
+ #define BENCHMARK_PAL_OPERATING_SYSTEM
+
+ #define BENCHMARK_PAL_OS_STRING "Linux"
+ #define BENCHMARK_PAL_MEMORY_TYPE BENCHMARK_MEMORY_TYPE_NUMA
+ #define BENCHMARK_PAL_MEMORY_TYPE_STRING "NUMA"
+
+ #include <unistd.h>
+ #include <numa.h>
+ #if( _POSIX_MEMLOCK_RANGE > 0 )
+ #include <sys/mman.h>
+ #endif
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined BENCHMARK_PAL_OPERATING_SYSTEM )
+
+ #error No operating system porting abstraction layer.
+
+#endif
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void util_cmdline_init( struct util_cmdline_state *cs )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ assert( cs != NULL );
+
+ for( loop = 0 ; loop < NUMBER_OF_LOWERCASE_LETTERS_IN_LATIN_ALPHABET ; loop++ )
+ {
+ cs->args[loop].arg_type = UTIL_CMDLINE_ARG_TYPE_UNSET;
+ cs->args[loop].processed_flag = LOWERED;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void util_cmdline_cleanup( struct util_cmdline_state *cs )
+{
+ assert( cs != NULL );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+void util_cmdline_add_arg( struct util_cmdline_state *cs, char arg_letter, enum util_cmdline_arg_type arg_type )
+{
+ lfds710_pal_uint_t
+ index;
+
+ assert( cs != NULL );
+ assert( arg_letter >= 'a' and arg_letter <= 'z' );
+ // TRD : arg_type can be any value in its range
+
+ index = arg_letter - 'a';
+
+ cs->args[index].arg_type = arg_type;
+
+ if( arg_type == UTIL_CMDLINE_ARG_TYPE_FLAG )
+ cs->args[index].arg_data.flag.flag = LOWERED;
+
+ if( arg_type == UTIL_CMDLINE_ARG_TYPE_STRING )
+ cs->args[index].arg_data.string.string = NULL;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int util_cmdline_process_args( struct util_cmdline_state *cs, int argc, char **argv )
+{
+ char
+ *arg;
+
+ int
+ arg_letter,
+ cc,
+ loop,
+ rv = 1;
+
+ lfds710_pal_uint_t
+ index;
+
+ assert( cs != NULL );
+ assert( argc >= 1 );
+ assert( argv != NULL );
+
+ for( loop = 1 ; loop < argc ; loop++ )
+ {
+ arg = *(argv+loop);
+
+ switch( *arg )
+ {
+ case '-':
+ arg_letter = tolower( *(arg+1) );
+
+ if( arg_letter >= 'a' and arg_letter <= 'z' )
+ {
+ index = arg_letter - 'a';
+
+ switch( cs->args[index].arg_type )
+ {
+ case UTIL_CMDLINE_ARG_TYPE_INTEGER_RANGE:
+ if( loop+1 >= argc )
+ rv = 0;
+
+ if( loop+1 < argc )
+ {
+ cc = sscanf( *(argv+loop+1), "%llu-%llu", &cs->args[index].arg_data.integer_range.integer_start, &cs->args[index].arg_data.integer_range.integer_end );
+
+ if( cc != 2 )
+ rv = 0;
+
+ if( cc == 2 )
+ {
+ cs->args[index].processed_flag = RAISED;
+ loop++;
+ }
+ }
+ break;
+
+ case UTIL_CMDLINE_ARG_TYPE_INTEGER:
+ if( loop+1 >= argc )
+ rv = 0;
+
+ if( loop+1 < argc )
+ {
+ cc = sscanf( *(argv+loop+1), "%llu", &cs->args[index].arg_data.integer.integer );
+
+ if( cc != 1 )
+ rv = 0;
+
+ if( cc == 1 )
+ {
+ cs->args[index].processed_flag = RAISED;
+ loop++;
+ }
+ }
+ break;
+
+ case UTIL_CMDLINE_ARG_TYPE_FLAG:
+ cs->args[index].arg_data.flag.flag = RAISED;
+ cs->args[index].processed_flag = RAISED;
+ break;
+
+ case UTIL_CMDLINE_ARG_TYPE_STRING:
+ cs->args[index].arg_data.string.string = *(argv+loop+1);
+ cs->args[index].processed_flag = RAISED;
+ loop++;
+ break;
+
+ case UTIL_CMDLINE_ARG_TYPE_UNSET:
+ break;
+ }
+ }
+ break;
+
+ default:
+ rv = 0;
+ break;
+ }
+ }
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+void util_cmdline_get_arg_data( struct util_cmdline_state *cs, char arg_letter, union util_cmdline_arg_data **arg_data )
+{
+ lfds710_pal_uint_t
+ index;
+
+ assert( cs != NULL );
+ assert( arg_letter >= 'a' and arg_letter <= 'z' );
+ assert( arg_data != NULL );
+
+ index = arg_letter - 'a';
+
+ if( cs->args[index].processed_flag == RAISED )
+ *arg_data = &cs->args[index].arg_data;
+ else
+ *arg_data = NULL;
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include <ctype.h>
+
+/***** defines *****/
+#define NUMBER_OF_LOWERCASE_LETTERS_IN_LATIN_ALPHABET 26
+
+/***** enums *****/
+enum util_cmdline_arg_type
+{
+ UTIL_CMDLINE_ARG_TYPE_INTEGER_RANGE,
+ UTIL_CMDLINE_ARG_TYPE_INTEGER,
+ UTIL_CMDLINE_ARG_TYPE_FLAG,
+ UTIL_CMDLINE_ARG_TYPE_STRING,
+ UTIL_CMDLINE_ARG_TYPE_UNSET
+};
+
+/***** structs *****/
+struct util_cmdline_arg_integer_range
+{
+ int long long unsigned
+ integer_start,
+ integer_end;
+};
+
+struct util_cmdline_arg_integer
+{
+ int long long unsigned
+ integer;
+};
+
+struct util_cmdline_arg_flag
+{
+ enum flag
+ flag;
+};
+
+struct util_cmdline_arg_string
+{
+ char
+ *string;
+};
+
+union util_cmdline_arg_data
+{
+ struct util_cmdline_arg_integer_range
+ integer_range;
+
+ struct util_cmdline_arg_integer
+ integer;
+
+ struct util_cmdline_arg_flag
+ flag;
+
+ struct util_cmdline_arg_string
+ string;
+};
+
+struct util_cmdline_arg_letter_and_data
+{
+ enum util_cmdline_arg_type
+ arg_type;
+
+ enum flag
+ processed_flag;
+
+ union util_cmdline_arg_data
+ arg_data;
+};
+
+struct util_cmdline_state
+{
+ struct util_cmdline_arg_letter_and_data
+ args[NUMBER_OF_LOWERCASE_LETTERS_IN_LATIN_ALPHABET];
+};
+
+/***** public protoypes *****/
+void util_cmdline_init( struct util_cmdline_state *cs );
+void util_cmdline_cleanup( struct util_cmdline_state *cs );
+void util_cmdline_add_arg( struct util_cmdline_state *cs, char arg_letter, enum util_cmdline_arg_type arg_type );
+int util_cmdline_process_args( struct util_cmdline_state *cs, int argc, char **argv );
+void util_cmdline_get_arg_data( struct util_cmdline_state *cs, char arg_letter, union util_cmdline_arg_data **arg_data );
+
--- /dev/null
+##### paths #####
+BINDIR := ../../bin
+INCDIR := ../../inc
+OBJDIR := ../../obj
+SRCDIR := ../../src
+
+##### misc #####
+QUIETLY := 1>/dev/null 2>/dev/null
+VERSION_NUMBER := 1
+MINOR_NUMBER := 0
+RELEASE_NUMBER := 0
+
+##### sources, objects and libraries #####
+BINNAME := libbenchmark
+ARFILENAME := $(BINNAME).a
+ARPATHNAME := $(BINDIR)/$(ARFILENAME)
+SOBASENAME := $(BINNAME).so
+SONAME := $(SOBASENAME).$(VERSION_NUMBER)
+SOFILENAME := $(SONAME).$(MINOR_NUMBER).$(RELEASE_NUMBER)
+SOPATHNAME := $(BINDIR)/$(SOFILENAME)
+INCNAME := $(INCDIR)/$(BINNAME).h
+SRCDIRS := libbenchmark_benchmarkinstance libbenchmark_benchmarks_btree_au_readn_writen libbenchmark_benchmarks_freelist_push1_then_pop1 libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1 libbenchmark_benchmarkset libbenchmark_benchmarksuite libbenchmark_datastructures_btree_au libbenchmark_datastructures_freelist libbenchmark_datastructures_queue_umm libbenchmark_misc libbenchmark_porting_abstraction_layer libbenchmark_results libbenchmark_threadset libbenchmark_topology libbenchmark_topology_node
+SOURCES := libbenchmark_benchmarkinstance_cleanup.c libbenchmark_benchmarkinstance_init.c libbenchmark_benchmarkinstance_run.c \
+ libbenchmark_benchmarks_btree_au_gcc_spinlock_atomic_readn_writen.c libbenchmark_benchmarks_btree_au_gcc_spinlock_sync_readn_writen.c libbenchmark_benchmarks_btree_au_liblfds700_lockfree_readn_writen.c libbenchmark_benchmarks_btree_au_liblfds710_lockfree_readn_writen.c libbenchmark_benchmarks_btree_au_msvc_spinlock_readn_writen.c libbenchmark_benchmarks_btree_au_pthread_mutex_readn_writen.c libbenchmark_benchmarks_btree_au_pthread_rwlock_readn_writen.c libbenchmark_benchmarks_btree_au_pthread_spinlock_process_private_readn_writen.c libbenchmark_benchmarks_btree_au_pthread_spinlock_process_shared_readn_writen.c libbenchmark_benchmarks_btree_au_windows_critical_section_readn_writen.c libbenchmark_benchmarks_btree_au_windows_mutex_readn_writen.c \
+ libbenchmark_benchmarks_freelist_gcc_spinlock_atomic_push1_then_pop1.c libbenchmark_benchmarks_freelist_gcc_spinlock_sync_push1_then_pop1.c libbenchmark_benchmarks_freelist_liblfds700_lockfree_push1_then_pop1.c libbenchmark_benchmarks_freelist_liblfds710_lockfree_push1_then_pop1.c libbenchmark_benchmarks_freelist_msvc_spinlock_push1_then_pop1.c libbenchmark_benchmarks_freelist_pthread_mutex_push1_then_pop1.c libbenchmark_benchmarks_freelist_pthread_rwlock_push1_then_pop1.c libbenchmark_benchmarks_freelist_pthread_spinlock_process_private_push1_then_pop1.c libbenchmark_benchmarks_freelist_pthread_spinlock_process_shared_push1_then_pop1.c libbenchmark_benchmarks_freelist_windows_critical_section_push1_then_pop1.c libbenchmark_benchmarks_freelist_windows_mutex_push1_then_pop1.c \
+ libbenchmark_benchmarks_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_liblfds700_lockfree_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_liblfds710_lockfree_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_msvc_spinlock_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_pthread_mutex_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_pthread_rwlock_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_windows_critical_section_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_windows_mutex_enqueue1_dequeue1.c \
+ libbenchmark_benchmarkset_add.c libbenchmark_benchmarkset_cleanup.c libbenchmark_benchmarkset_gnuplot.c libbenchmark_benchmarkset_init.c libbenchmark_benchmarkset_run.c \
+ libbenchmark_benchmarksuite_add.c libbenchmark_benchmarksuite_cleanup.c libbenchmark_benchmarksuite_gnuplot.c libbenchmark_benchmarksuite_init.c libbenchmark_benchmarksuite_run.c \
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic.c libbenchmark_datastructure_btree_au_gcc_spinlock_sync.c libbenchmark_datastructure_btree_au_msvc_spinlock.c libbenchmark_datastructure_btree_au_pthread_mutex.c libbenchmark_datastructure_btree_au_pthread_rwlock.c libbenchmark_datastructure_btree_au_pthread_spinlock_process_private.c libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared.c libbenchmark_datastructure_btree_au_windows_critical_section.c libbenchmark_datastructure_btree_au_windows_mutex.c \
+ libbenchmark_datastructure_freelist_gcc_spinlock_atomic.c libbenchmark_datastructure_freelist_gcc_spinlock_sync.c libbenchmark_datastructure_freelist_msvc_spinlock.c libbenchmark_datastructure_freelist_pthread_mutex.c libbenchmark_datastructure_freelist_pthread_rwlock.c libbenchmark_datastructure_freelist_pthread_spinlock_process_private.c libbenchmark_datastructure_freelist_pthread_spinlock_process_shared.c libbenchmark_datastructure_freelist_windows_critical_section.c libbenchmark_datastructure_freelist_windows_mutex.c \
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic.c libbenchmark_datastructure_queue_umm_gcc_spinlock_sync.c libbenchmark_datastructure_queue_umm_msvc_spinlock.c libbenchmark_datastructure_queue_umm_pthread_mutex.c libbenchmark_datastructure_queue_umm_pthread_rwlock.c libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private.c libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared.c libbenchmark_datastructure_queue_umm_windows_critical_section.c libbenchmark_datastructure_queue_umm_windows_mutex.c \
+ libbenchmark_misc_globals.c libbenchmark_misc_pal_helpers.c libbenchmark_misc_query.c \
+ libbenchmark_porting_abstraction_layer_populate_topology.c libbenchmark_porting_abstraction_layer_print_string.c \
+ libbenchmark_results_cleanup.c libbenchmark_results_compare.c libbenchmark_results_get_result.c libbenchmark_results_init.c libbenchmark_results_put_result.c \
+ libbenchmark_threadset_cleanup.c libbenchmark_threadset_init.c libbenchmark_threadset_operations.c \
+ libbenchmark_topology_cleanup.c libbenchmark_topology_compare.c libbenchmark_topology_init.c libbenchmark_topology_insert.c libbenchmark_topology_iterate.c libbenchmark_topology_lpsets.c libbenchmark_topology_numa.c libbenchmark_topology_query.c libbenchmark_topology_string.c \
+ libbenchmark_topology_node_cleanup.c libbenchmark_topology_node_compare.c libbenchmark_topology_node_init.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))
+SYSLIBS :=
+
+##### default paths fix up #####
+CPATH := $(subst : ,:,$(SRCDIR):$(INCDIR))
+
+##### tools #####
+DG := gcc
+DGFLAGS_MANDATORY := -MM
+DGFLAGS_OPTIONAL := -std=gnu89
+
+CC := gcc
+CFLAGS_MANDATORY := -c -fno-strict-aliasing
+CFLAGS_OPTIONAL := -std=gnu89 -Wall -Werror -Wno-unknown-pragmas
+CFLAGS_MANDATORY_COV := -O0 -ggdb -DCOVERAGE -fprofile-arcs -ftest-coverage
+CFLAGS_MANDATORY_DBG := -O0 -ggdb -D_DEBUG
+CFLAGS_MANDATORY_PROF := -O0 -ggdb -DPROF -pg
+CFLAGS_MANDATORY_REL := -O2 -DNDEBUG
+CFLAGS_MANDATORY_TSAN := -O0 -ggdb -DTSAN -fsanitize=thread -fPIC
+
+AR := ar
+ARFLAGS :=
+ARFLAGS_MANDATORY := rcs
+ARFLAGS_OPTIONAL :=
+
+LD := gcc
+LDFLAGS_MANDATORY := -shared -Wl,-soname,$(SONAME) -o $(SOPATHNAME)
+LDFLAGS_OPTIONAL := -std=gnu89 -Wall -Werror
+LDFLAGS_MANDATORY_COV := -O0 -fprofile-arcs -ftest-coverage
+LDFLAGS_MANDATORY_DBG := -O0 -ggdb
+LDFLAGS_MANDATORY_PROF := -O0 -pg
+LDFLAGS_MANDATORY_REL := -O2 -s
+LDFLAGS_MANDATORY_TSAN := -O0 -fsanitize=thread -fPIC
+
+##### build variants #####
+ifeq ($(findstring so,$(MAKECMDGOALS)),so)
+ CFLAGS_MANDATORY += -fPIC
+endif
+
+# TRD : default to debug
+ifeq ($(MAKECMDGOALS),)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring cov,$(MAKECMDGOALS)),cov)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_COV)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_COV)
+ SYSLIBS += -lgcov
+endif
+
+ifeq ($(findstring dbg,$(MAKECMDGOALS)),dbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring prof,$(MAKECMDGOALS)),prof)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_PROF)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_PROF)
+endif
+
+ifeq ($(findstring rel,$(MAKECMDGOALS)),rel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+endif
+
+ifeq ($(findstring tsan,$(MAKECMDGOALS)),tsan)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_TSAN)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_TSAN)
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.o : %.c
+ $(DG) $(DGFLAGS_OPTIONAL) $(DGFLAGS) $(DGFLAGS_MANDATORY) $< >$(OBJDIR)/$*.d
+ $(CC) $(CFLAGS_OPTIONAL) $(CFLAGS) $(CFLAGS_MANDATORY) -o $@ $<
+
+##### explicit rules #####
+$(ARPATHNAME) : $(OBJECTS)
+ $(AR) $(ARFLAGS_OPTIONAL) $(ARFLAGS) $(ARFLAGS_MANDATORY) $(ARPATHNAME) $(OBJECTS)
+
+$(SOPATHNAME) : $(OBJECTS)
+ $(LD) $(LDFLAGS_OPTIONAL) $(LDFLAGS) $(LDFLAGS_MANDATORY) $(OBJECTS) -o $(SOPATHNAME)
+ @ln -fs $(SOFILENAME) $(BINDIR)/$(SONAME)
+ @ln -fs $(SOFILENAME) $(BINDIR)/$(SOBASENAME)
+
+##### phony #####
+.PHONY : clean ar_cov ar_dbg ar_prof ar_rel ar_tsan ar_vanilla so_cov so_dbg so_prof so_rel so_tsan so_vanilla
+
+clean :
+ @rm -f $(BINDIR)/* $(OBJDIR)/*
+
+ar_cov : $(ARPATHNAME) # archive (.a), coverage
+ar_dbg : $(ARPATHNAME) # archive (.a), debug
+ar_prof : $(ARPATHNAME) # archive (.a), profiling
+ar_rel : $(ARPATHNAME) # archive (.a), release
+ar_tsan : $(ARPATHNAME) # archive (.a), thread sanitizer
+ar_vanilla : $(ARPATHNAME) # archive (.a), no specific-build arguments
+ar_install :
+ # TRD : leading backslash to use command rather than alias
+ # as many Linux distros have a built-in alias to force
+ # a prompt ("y/n?") on file overwrite - silent and
+ # unexpected interference which breaks a makefile
+ @mkdir -p $(INSLIBDIR)
+ @\cp $(ARPATHNAME) $(INSLIBDIR)
+ @mkdir -p $(INSINCDIR)
+ @\cp -r $(INCDIR)/* $(INSINCDIR)
+ar_uninstall :
+ @rm $(INSLIBDIR)/$(ARFILENAME)
+ @rm -r $(INSINCDIR)/$(BINNAME)
+ @rm -r $(INSINCDIR)/$(BINNAME).h
+
+so_cov : $(SOPATHNAME) # shared (.so), coverage
+so_dbg : $(SOPATHNAME) # shared (.so), debug
+so_prof : $(SOPATHNAME) # shared (.so), profiling
+so_rel : $(SOPATHNAME) # shared (.so), release
+so_tsan : $(SOPATHNAME) # shared (.so), thread sanitizer
+so_vanilla : $(SOPATHNAME) # shared (.so), no specific-build arguments
+so_install :
+ @mkdir -p $(INSINCDIR)
+ @\cp $(SOPATHNAME) $(INSLIBDIR)
+ @ldconfig -vn $(INSLIBDIR)
+ @ln -s $(SONAME) $(INSLIBDIR)/$(SOBASENAME)
+ @mkdir -p $(INSLIBDIR)
+ @\cp -r $(INCDIR)/* $(INSINCDIR)
+so_uninstall :
+ @rm -f $(INSLIBDIR)/$(SOFILENAME)
+ @rm -f $(INSLIBDIR)/$(SOBASENAME)
+ @rm -f $(INSLIBDIR)/$(SONAME)
+ @rm -r $(INSINCDIR)/$(BINNAME)
+ @rm -r $(INSINCDIR)/$(BINNAME).h
+
+##### dependencies #####
+-include $(DEPENDS)
+
--- /dev/null
+lib-y :=
+
+lib-y += ../../src/libbenchmark_benchmarkinstance/libbenchmark_benchmarkinstance_cleanup.o
+lib-y += ../../src/libbenchmark_benchmarkinstance/libbenchmark_benchmarkinstance_init.o
+lib-y += ../../src/libbenchmark_benchmarkinstance/libbenchmark_benchmarkinstance_run.o
+
+lib-y += ../../src/libbenchmark_benchmarks_btree_au_readn_writen/libbenchmark_benchmarks_btree_au_gcc_spinlock_atomic_readn_writen.o
+lib-y += ../../src/libbenchmark_benchmarks_btree_au_readn_writen/libbenchmark_benchmarks_btree_au_gcc_spinlock_sync_readn_writen.o
+lib-y += ../../src/libbenchmark_benchmarks_btree_au_readn_writen/libbenchmark_benchmarks_btree_au_liblfds700_lockfree_readn_writen.o
+lib-y += ../../src/libbenchmark_benchmarks_btree_au_readn_writen/libbenchmark_benchmarks_btree_au_liblfds710_lockfree_readn_writen.o
+lib-y += ../../src/libbenchmark_benchmarks_btree_au_readn_writen/libbenchmark_benchmarks_btree_au_msvc_spinlock_readn_writen.o
+lib-y += ../../src/libbenchmark_benchmarks_btree_au_readn_writen/libbenchmark_benchmarks_btree_au_pthread_mutex_readn_writen.o
+lib-y += ../../src/libbenchmark_benchmarks_btree_au_readn_writen/libbenchmark_benchmarks_btree_au_pthread_rwlock_readn_writen.o
+lib-y += ../../src/libbenchmark_benchmarks_btree_au_readn_writen/libbenchmark_benchmarks_btree_au_pthread_spinlock_process_private_readn_writen.o
+lib-y += ../../src/libbenchmark_benchmarks_btree_au_readn_writen/libbenchmark_benchmarks_btree_au_pthread_spinlock_process_shared_readn_writen.o
+lib-y += ../../src/libbenchmark_benchmarks_btree_au_readn_writen/libbenchmark_benchmarks_btree_au_windows_critical_section_readn_writen.o
+lib-y += ../../src/libbenchmark_benchmarks_btree_au_readn_writen/libbenchmark_benchmarks_btree_au_windows_mutex_readn_writen.o
+
+lib-y += ../../src/libbenchmark_benchmarks_freelist_push1_then_pop1/libbenchmark_benchmarks_freelist_gcc_spinlock_atomic_push1_then_pop1.o
+lib-y += ../../src/libbenchmark_benchmarks_freelist_push1_then_pop1/libbenchmark_benchmarks_freelist_gcc_spinlock_sync_push1_then_pop1.o
+lib-y += ../../src/libbenchmark_benchmarks_freelist_push1_then_pop1/libbenchmark_benchmarks_freelist_liblfds700_lockfree_push1_then_pop1.o
+lib-y += ../../src/libbenchmark_benchmarks_freelist_push1_then_pop1/libbenchmark_benchmarks_freelist_liblfds710_lockfree_push1_then_pop1.o
+lib-y += ../../src/libbenchmark_benchmarks_freelist_push1_then_pop1/libbenchmark_benchmarks_freelist_msvc_spinlock_push1_then_pop1.o
+lib-y += ../../src/libbenchmark_benchmarks_freelist_push1_then_pop1/libbenchmark_benchmarks_freelist_pthread_mutex_push1_then_pop1.o
+lib-y += ../../src/libbenchmark_benchmarks_freelist_push1_then_pop1/libbenchmark_benchmarks_freelist_pthread_rwlock_push1_then_pop1.o
+lib-y += ../../src/libbenchmark_benchmarks_freelist_push1_then_pop1/libbenchmark_benchmarks_freelist_pthread_spinlock_process_private_push1_then_pop1.o
+lib-y += ../../src/libbenchmark_benchmarks_freelist_push1_then_pop1/libbenchmark_benchmarks_freelist_pthread_spinlock_process_shared_push1_then_pop1.o
+lib-y += ../../src/libbenchmark_benchmarks_freelist_push1_then_pop1/libbenchmark_benchmarks_freelist_windows_critical_section_push1_then_pop1.o
+lib-y += ../../src/libbenchmark_benchmarks_freelist_push1_then_pop1/libbenchmark_benchmarks_freelist_windows_mutex_push1_then_pop1.o
+
+lib-y += ../../src/libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1/libbenchmark_benchmarks_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1.o
+lib-y += ../../src/libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1/libbenchmark_benchmarks_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1.o
+lib-y += ../../src/libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1/libbenchmark_benchmarks_queue_umm_liblfds700_lockfree_enqueue1_dequeue1.o
+lib-y += ../../src/libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1/libbenchmark_benchmarks_queue_umm_liblfds710_lockfree_enqueue1_dequeue1.o
+lib-y += ../../src/libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1/libbenchmark_benchmarks_queue_umm_msvc_spinlock_enqueue1_dequeue1.o
+lib-y += ../../src/libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1/libbenchmark_benchmarks_queue_umm_pthread_mutex_enqueue1_dequeue1.o
+lib-y += ../../src/libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1/libbenchmark_benchmarks_queue_umm_pthread_rwlock_enqueue1_dequeue1.o
+lib-y += ../../src/libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1/libbenchmark_benchmarks_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1.o
+lib-y += ../../src/libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1/libbenchmark_benchmarks_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1.o
+lib-y += ../../src/libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1/libbenchmark_benchmarks_queue_umm_windows_critical_section_enqueue1_dequeue1.o
+lib-y += ../../src/libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1/libbenchmark_benchmarks_queue_umm_windows_mutex_enqueue1_dequeue1.o
+
+lib-y += ../../src/libbenchmark_benchmarkset/libbenchmark_benchmarkset_add.o
+lib-y += ../../src/libbenchmark_benchmarkset/libbenchmark_benchmarkset_cleanup.o
+lib-y += ../../src/libbenchmark_benchmarkset/libbenchmark_benchmarkset_gnuplot.o
+lib-y += ../../src/libbenchmark_benchmarkset/libbenchmark_benchmarkset_init.o
+lib-y += ../../src/libbenchmark_benchmarkset/libbenchmark_benchmarkset_run.o
+
+lib-y += ../../src/libbenchmark_benchmarksuite/libbenchmark_benchmarksuite_add.o
+lib-y += ../../src/libbenchmark_benchmarksuite/libbenchmark_benchmarksuite_cleanup.o
+lib-y += ../../src/libbenchmark_benchmarksuite/libbenchmark_benchmarksuite_gnuplot.o
+lib-y += ../../src/libbenchmark_benchmarksuite/libbenchmark_benchmarksuite_init.o
+lib-y += ../../src/libbenchmark_benchmarksuite/libbenchmark_benchmarksuite_run.o
+
+lib-y += ../../src/libbenchmark_datastructures_btree_au/libbenchmark_datastructure_btree_au_gcc_spinlock_atomic.o
+lib-y += ../../src/libbenchmark_datastructures_btree_au/libbenchmark_datastructure_btree_au_gcc_spinlock_sync.o
+lib-y += ../../src/libbenchmark_datastructures_btree_au/libbenchmark_datastructure_btree_au_msvc_spinlock.o
+lib-y += ../../src/libbenchmark_datastructures_btree_au/libbenchmark_datastructure_btree_au_pthread_mutex.o
+lib-y += ../../src/libbenchmark_datastructures_btree_au/libbenchmark_datastructure_btree_au_pthread_rwlock.o
+lib-y += ../../src/libbenchmark_datastructures_btree_au/libbenchmark_datastructure_btree_au_pthread_spinlock_process_private.o
+lib-y += ../../src/libbenchmark_datastructures_btree_au/libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared.o
+lib-y += ../../src/libbenchmark_datastructures_btree_au/libbenchmark_datastructure_btree_au_windows_critical_section.o
+lib-y += ../../src/libbenchmark_datastructures_btree_au/libbenchmark_datastructure_btree_au_windows_mutex.o
+
+lib-y += ../../src/libbenchmark_datastructures_freelist/libbenchmark_datastructure_freelist_gcc_spinlock_atomic.o
+lib-y += ../../src/libbenchmark_datastructures_freelist/libbenchmark_datastructure_freelist_gcc_spinlock_sync.o
+lib-y += ../../src/libbenchmark_datastructures_freelist/libbenchmark_datastructure_freelist_msvc_spinlock.o
+lib-y += ../../src/libbenchmark_datastructures_freelist/libbenchmark_datastructure_freelist_pthread_mutex.o
+lib-y += ../../src/libbenchmark_datastructures_freelist/libbenchmark_datastructure_freelist_pthread_rwlock.o
+lib-y += ../../src/libbenchmark_datastructures_freelist/libbenchmark_datastructure_freelist_pthread_spinlock_process_private.o
+lib-y += ../../src/libbenchmark_datastructures_freelist/libbenchmark_datastructure_freelist_pthread_spinlock_process_shared.o
+lib-y += ../../src/libbenchmark_datastructures_freelist/libbenchmark_datastructure_freelist_windows_critical_section.o
+lib-y += ../../src/libbenchmark_datastructures_freelist/libbenchmark_datastructure_freelist_windows_mutex.o
+
+lib-y += ../../src/libbenchmark_datastructures_queue_umm/libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic.o
+lib-y += ../../src/libbenchmark_datastructures_queue_umm/libbenchmark_datastructure_queue_umm_gcc_spinlock_sync.o
+lib-y += ../../src/libbenchmark_datastructures_queue_umm/libbenchmark_datastructure_queue_umm_msvc_spinlock.o
+lib-y += ../../src/libbenchmark_datastructures_queue_umm/libbenchmark_datastructure_queue_umm_pthread_mutex.o
+lib-y += ../../src/libbenchmark_datastructures_queue_umm/libbenchmark_datastructure_queue_umm_pthread_rwlock.o
+lib-y += ../../src/libbenchmark_datastructures_queue_umm/libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private.o
+lib-y += ../../src/libbenchmark_datastructures_queue_umm/libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared.o
+lib-y += ../../src/libbenchmark_datastructures_queue_umm/libbenchmark_datastructure_queue_umm_windows_critical_section.o
+lib-y += ../../src/libbenchmark_datastructures_queue_umm/libbenchmark_datastructure_queue_umm_windows_mutex.o
+
+lib-y += ../../src/libbenchmark_misc/libbenchmark_misc_globals.o
+lib-y += ../../src/libbenchmark_misc/libbenchmark_misc_pal_helpers.o
+lib-y += ../../src/libbenchmark_misc/libbenchmark_misc_query.o
+
+lib-y += ../../src/libbenchmark_porting_abstraction_layer/libbenchmark_porting_abstraction_layer_populate_topology.o
+lib-y += ../../src/libbenchmark_porting_abstraction_layer/libbenchmark_porting_abstraction_layer_print_string.o
+
+lib-y += ../../src/libbenchmark_results/libbenchmark_results_cleanup.o
+lib-y += ../../src/libbenchmark_results/libbenchmark_results_compare.o
+lib-y += ../../src/libbenchmark_results/libbenchmark_results_get_result.o
+lib-y += ../../src/libbenchmark_results/libbenchmark_results_init.o
+lib-y += ../../src/libbenchmark_results/libbenchmark_results_put_result.o
+
+lib-y += ../../src/libbenchmark_threadset/libbenchmark_threadset_cleanup.o
+lib-y += ../../src/libbenchmark_threadset/libbenchmark_threadset_init.o
+lib-y += ../../src/libbenchmark_threadset/libbenchmark_threadset_operations.o
+
+lib-y += ../../src/libbenchmark_topology/libbenchmark_topology_cleanup.o
+lib-y += ../../src/libbenchmark_topology/libbenchmark_topology_compare.o
+lib-y += ../../src/libbenchmark_topology/libbenchmark_topology_init.o
+lib-y += ../../src/libbenchmark_topology/libbenchmark_topology_insert.o
+lib-y += ../../src/libbenchmark_topology/libbenchmark_topology_iterate.o
+lib-y += ../../src/libbenchmark_topology/libbenchmark_topology_lpsets.o
+lib-y += ../../src/libbenchmark_topology/libbenchmark_topology_numa.o
+lib-y += ../../src/libbenchmark_topology/libbenchmark_topology_query.o
+lib-y += ../../src/libbenchmark_topology/libbenchmark_topology_string.o
+
+lib-y += ../../src/libbenchmark_topology_node/libbenchmark_topology_node_cleanup.o
+lib-y += ../../src/libbenchmark_topology_node/libbenchmark_topology_node_compare.o
+lib-y += ../../src/libbenchmark_topology_node/libbenchmark_topology_node_init.o
+lib-y += ../../src/libbenchmark_topology_node/libbenchmark_topology_node_cleanup.o
+lib-y += ../../src/libbenchmark_topology_node/libbenchmark_topology_node_compare.o
+lib-y += ../../src/libbenchmark_topology_node/libbenchmark_topology_node_init.o
+
+libs-y := ../../bin/
+
+ccflags-y := -I$(src)/../../inc
+ccflags-y += -I$(src)/../../inc/liblfds710
+ccflags-y += -DKERNEL_MODE
+ccflags-y += -DNDEBUG
+ccflags-y += -fno-strict-aliasing
+ccflags-y += -std=gnu89
+ccflags-y += -Wall
+ccflags-y += -Werror
+ccflags-y += -Wno-unknown-pragmas
+
--- /dev/null
+default:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD)
+
+clean:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) clean
+ find ../../src/ -name "*.o" -type f -delete
+
+help:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) help
+
+modules:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) modules
+
+
--- /dev/null
+Good filename, eh? :-)\r
+\r
+The build is broken in two ways.\r
+\r
+Firstly, the porting abstraction layer is incomplete - the high resolution\r
+ timers are missing.\r
+\r
+Secondly, there is a bug in the liblfds7.0.0 (7.0.0) header file, such that\r
+ it tries to use the wrong platform. This will be fixed in 7.0.1,\r
+ a bug fix release.\r
+\r
--- /dev/null
+EXPORTS
+
+libbenchmark_topology_init = libbenchmark_topology_init
+libbenchmark_topology_cleanup = libbenchmark_topology_cleanup
+libbenchmark_topology_generate_string = libbenchmark_topology_generate_string
+libbenchmark_topology_generate_numa_modes_list = libbenchmark_topology_generate_numa_modes_list
+libbenchmark_topology_generate_deduplicated_logical_processor_sets = libbenchmark_topology_generate_deduplicated_logical_processor_sets
+libbenchmark_topology_iterate_init = libbenchmark_topology_iterate_init
+libbenchmark_topology_iterate = libbenchmark_topology_iterate
+libbenchmark_topology_query = libbenchmark_topology_query
+
+libbenchmark_topology_node_init = libbenchmark_topology_node_init
+libbenchmark_topology_node_cleanup = libbenchmark_topology_node_cleanup
+
+libbenchmark_benchmarksuite_init = libbenchmark_benchmarksuite_init
+libbenchmark_benchmarksuite_cleanup = libbenchmark_benchmarksuite_cleanup
+libbenchmark_benchmarksuite_run = libbenchmark_benchmarksuite_run
+libbenchmark_benchmarksuite_get_list_of_gnuplot_strings = libbenchmark_benchmarksuite_get_list_of_gnuplot_strings
+
+libbenchmark_results_init = libbenchmark_results_init
+libbenchmark_results_get_result = libbenchmark_results_get_result
+libbenchmark_results_cleanup = libbenchmark_results_cleanup
+
+libbenchmark_misc_query = libbenchmark_misc_query
+
--- /dev/null
+##### paths #####
+BINDIR := ..\..\bin
+INCDIR := ..\..\inc
+OBJDIR := ..\..\obj
+SRCDIR := ..\..\src
+
+##### misc #####
+QUIETLY := 1>nul 2>nul
+NULL :=
+SPACE := $(NULL) # TRD : with a trailing space
+
+##### sources, objects and libraries #####
+BINNAME := libbenchmark
+LIB_BINARY := $(BINDIR)\$(BINNAME).lib
+DLL_BINARY := $(BINDIR)\$(BINNAME).dll
+SRCDIRS := libbenchmark_benchmarkinstance libbenchmark_benchmarks_btree_au_readn_writen libbenchmark_benchmarkset libbenchmark_benchmarks_freelist_push1_then_pop1 libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1 libbenchmark_benchmarksuite libbenchmark_datastructures_btree_au libbenchmark_datastructures_freelist libbenchmark_datastructures_queue_umm libbenchmark_misc libbenchmark_porting_abstraction_layer libbenchmark_results libbenchmark_threadset libbenchmark_topology libbenchmark_topology_node
+SOURCES := libbenchmark_benchmarkinstance_cleanup.c libbenchmark_benchmarkinstance_init.c libbenchmark_benchmarkinstance_run.c \
+ libbenchmark_benchmarkset_add.c libbenchmark_benchmarkset_cleanup.c libbenchmark_benchmarkset_gnuplot.c libbenchmark_benchmarkset_init.c libbenchmark_benchmarkset_run.c \
+ libbenchmark_benchmarks_btree_au_gcc_spinlock_atomic_readn_writen.c libbenchmark_benchmarks_btree_au_gcc_spinlock_sync_readn_writen.c libbenchmark_benchmarks_btree_au_liblfds700_lockfree_readn_writen.c libbenchmark_benchmarks_btree_au_liblfds710_lockfree_readn_writen.c libbenchmark_benchmarks_btree_au_msvc_spinlock_readn_writen.c libbenchmark_benchmarks_btree_au_pthread_mutex_readn_writen.c libbenchmark_benchmarks_btree_au_pthread_rwlock_readn_writen.c libbenchmark_benchmarks_btree_au_pthread_spinlock_process_private_readn_writen.c libbenchmark_benchmarks_btree_au_pthread_spinlock_process_shared_readn_writen.c libbenchmark_benchmarks_btree_au_windows_critical_section_readn_writen.c libbenchmark_benchmarks_btree_au_windows_mutex_readn_writen.c \
+ libbenchmark_benchmarks_freelist_gcc_spinlock_atomic_push1_then_pop1.c libbenchmark_benchmarks_freelist_gcc_spinlock_sync_push1_then_pop1.c libbenchmark_benchmarks_freelist_liblfds700_lockfree_push1_then_pop1.c libbenchmark_benchmarks_freelist_liblfds710_lockfree_push1_then_pop1.c libbenchmark_benchmarks_freelist_msvc_spinlock_push1_then_pop1.c libbenchmark_benchmarks_freelist_pthread_mutex_push1_then_pop1.c libbenchmark_benchmarks_freelist_pthread_rwlock_push1_then_pop1.c libbenchmark_benchmarks_freelist_pthread_spinlock_process_private_push1_then_pop1.c libbenchmark_benchmarks_freelist_pthread_spinlock_process_shared_push1_then_pop1.c libbenchmark_benchmarks_freelist_windows_critical_section_push1_then_pop1.c libbenchmark_benchmarks_freelist_windows_mutex_push1_then_pop1.c \
+ libbenchmark_benchmarks_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_liblfds700_lockfree_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_liblfds710_lockfree_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_msvc_spinlock_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_pthread_mutex_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_pthread_rwlock_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_windows_critical_section_enqueue1_dequeue1.c libbenchmark_benchmarks_queue_umm_windows_mutex_enqueue1_dequeue1.c \
+ libbenchmark_benchmarksuite_add.c libbenchmark_benchmarksuite_cleanup.c libbenchmark_benchmarksuite_gnuplot.c libbenchmark_benchmarksuite_init.c libbenchmark_benchmarksuite_run.c \
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic.c libbenchmark_datastructure_btree_au_gcc_spinlock_sync.c libbenchmark_datastructure_btree_au_msvc_spinlock.c libbenchmark_datastructure_btree_au_pthread_mutex.c libbenchmark_datastructure_btree_au_pthread_rwlock.c libbenchmark_datastructure_btree_au_pthread_spinlock_process_private.c libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared.c libbenchmark_datastructure_btree_au_windows_critical_section.c libbenchmark_datastructure_btree_au_windows_mutex.c \
+ libbenchmark_datastructure_freelist_gcc_spinlock_atomic.c libbenchmark_datastructure_freelist_gcc_spinlock_sync.c libbenchmark_datastructure_freelist_msvc_spinlock.c libbenchmark_datastructure_freelist_pthread_mutex.c libbenchmark_datastructure_freelist_pthread_rwlock.c libbenchmark_datastructure_freelist_pthread_spinlock_process_private.c libbenchmark_datastructure_freelist_pthread_spinlock_process_shared.c libbenchmark_datastructure_freelist_windows_critical_section.c libbenchmark_datastructure_freelist_windows_mutex.c \
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic.c libbenchmark_datastructure_queue_umm_gcc_spinlock_sync.c libbenchmark_datastructure_queue_umm_msvc_spinlock.c libbenchmark_datastructure_queue_umm_pthread_mutex.c libbenchmark_datastructure_queue_umm_pthread_rwlock.c libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private.c libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared.c libbenchmark_datastructure_queue_umm_windows_critical_section.c libbenchmark_datastructure_queue_umm_windows_mutex.c \
+ libbenchmark_misc_globals.c libbenchmark_misc_pal_helpers.c libbenchmark_misc_query.c \
+ libbenchmark_porting_abstraction_layer_populate_topology.c libbenchmark_porting_abstraction_layer_print_string.c \
+ libbenchmark_results_cleanup.c libbenchmark_results_compare.c libbenchmark_results_get_result.c libbenchmark_results_init.c libbenchmark_results_put_result.c \
+ libbenchmark_threadset_cleanup.c libbenchmark_threadset_init.c libbenchmark_threadset_operations.c \
+ libbenchmark_topology_cleanup.c libbenchmark_topology_compare.c libbenchmark_topology_init.c libbenchmark_topology_insert.c libbenchmark_topology_iterate.c libbenchmark_topology_lpsets.c libbenchmark_topology_numa.c libbenchmark_topology_query.c libbenchmark_topology_string.c \
+ libbenchmark_topology_node_cleanup.c libbenchmark_topology_node_compare.c libbenchmark_topology_node_init.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))
+SYSLIBS := kernel32.lib
+USRLIBS := ..\..\..\..\..\liblfds7.0.0\liblfds700\bin\liblfds700.lib ..\..\..\..\liblfds710\bin\liblfds710.lib ..\..\..\libshared\bin\libshared.lib
+
+##### default paths fix up #####
+INCDIRS := $(patsubst %,%;,$(INCDIR))
+INCLUDE += $(subst $(SPACE),,$(INCDIRS))
+
+##### tools #####
+CC := cl
+CFLAGS_MANDATORY := /c "/Fd$(BINDIR)\$(BINNAME).pdb" /wd 4068
+CFLAGS_OPTIONAL := /DWIN32_LEAN_AND_MEAN /DUNICODE /D_UNICODE /nologo /W4 /WX
+CFLAGS_MANDATORY_DBG := /Od /Gm /Zi /D_DEBUG
+CFLAGS_MANDATORY_REL := /Ox /DNDEBUG
+
+AR := lib
+ARFLAGS :=
+ARFLAGS_MANDATORY := /subsystem:console
+ARFLAGS_OPTIONAL := /nologo /wx /verbose
+
+LD := link
+LDFLAGS_MANDATORY := /def:$(BINNAME).def /dll /nodefaultlib /subsystem:console
+LDFLAGS_OPTIONAL := /nologo /nxcompat /wx
+LDFLAGS_MANDATORY_DBG := /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"
+LDFLAGS_MANDATORY_REL := /incremental:no
+
+##### variants #####
+ifeq ($(MAKECMDGOALS),) # TRD : default to debug lib
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MTd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),libdbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MTd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),librel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL) /MT
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+ CLIB := libcmt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dlldbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MDd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := msvcrtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dllrel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL) /MD
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+ CLIB := msvcrt.lib
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.obj : %.c
+ $(CC) $(CFLAGS_OPTIONAL) $(CFLAGS) $(CFLAGS_MANDATORY) "/Fo$@" $<
+
+##### explicit rules #####
+$(LIB_BINARY) : $(OBJECTS)
+ $(AR) $(ARFLAGS_OPTIONAL) $(AFLAGS) $(ARFLAGS_MANDATORY) $(OBJECTS) /out:$(LIB_BINARY)
+
+$(DLL_BINARY) : $(OBJECTS)
+ $(LD) $(LDFLAGS_OPTIONAL) $(LFLAGS) $(LDFLAGS_MANDATORY) $(CLIB) $(SYSLIBS) $(USRLIBS) $(OBJECTS) /out:$(DLL_BINARY)
+
+##### phony #####
+.PHONY : clean librel libdbg dllrel dlldbg
+
+clean :
+ @erase /Q $(BINDIR)\$(BINNAME).* $(OBJDIR)\*.obj $(QUIETLY)
+
+dllrel : $(DLL_BINARY)
+dlldbg : $(DLL_BINARY)
+
+librel : $(LIB_BINARY)
+libdbg : $(LIB_BINARY)
+
+##### notes #####
+# /wd 4068 : turn off "unknown pragma" warning
+
--- /dev/null
+DIRS = single_dir_for_windows_kernel
+
+
--- /dev/null
+#include "libbenchmark_internal.h"
+
+
+
+
+
+/****************************************************************************/
+DRIVER_INITIALIZE DriverEntry;
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+NTSTATUS DriverEntry( struct _DRIVER_OBJECT *DriverObject, PUNICODE_STRING RegistryPath )
+{
+ return STATUS_SUCCESS;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+EXPORTS
+
+libbenchmark_topology_init = libbenchmark_topology_init
+libbenchmark_topology_cleanup = libbenchmark_topology_cleanup
+libbenchmark_topology_generate_string = libbenchmark_topology_generate_string
+libbenchmark_topology_generate_numa_modes_list = libbenchmark_topology_generate_numa_modes_list
+libbenchmark_topology_generate_deduplicated_logical_processor_sets = libbenchmark_topology_generate_deduplicated_logical_processor_sets
+libbenchmark_topology_iterate_init = libbenchmark_topology_iterate_init
+libbenchmark_topology_iterate = libbenchmark_topology_iterate
+libbenchmark_topology_query = libbenchmark_topology_query
+
+libbenchmark_topology_node_init = libbenchmark_topology_node_init
+libbenchmark_topology_node_cleanup = libbenchmark_topology_node_cleanup
+
+libbenchmark_benchmarksuite_init = libbenchmark_benchmarksuite_init
+libbenchmark_benchmarksuite_cleanup = libbenchmark_benchmarksuite_cleanup
+libbenchmark_benchmarksuite_run = libbenchmark_benchmarksuite_run
+libbenchmark_benchmarksuite_get_list_of_gnuplot_strings = libbenchmark_benchmarksuite_get_list_of_gnuplot_strings
+
+libbenchmark_results_init = libbenchmark_results_init
+libbenchmark_results_get_result = libbenchmark_results_get_result
+libbenchmark_results_cleanup = libbenchmark_results_cleanup
+
+libbenchmark_misc_query = libbenchmark_misc_query
+
--- /dev/null
+The Windows kernel build environment is primitive and has a number
+of severe limitations; in particular, all source files must be in
+one directory and it is not possible to choose the output binary type
+(static or dynamic library) from the build command line; rather,
+a string has to be modified in a text file used by the build (!)
+
+To deal with these limitations, it is necessary for a Windows kernel
+build to run a batch file prior to building.
+
+There are two batch files, one for static library builds and the other
+for dynamic library builds.
+
+They are both idempotent; you can run them as often as you like and
+switch between them as often as you want. It's all fine; whenever
+you run one of them, it will take you from whatever state you were
+previously in, into the state you want to be in.
+
+Both batch files copy all the sources file into a single directory,
+"/src/single_dir_for_windows_kernel/".
+
+The static library batch file will then copy "/sources.static" into
+"/src/single_dir_for_windows_kernel/", which will cause a static
+library to be built.
+
+The dynamic library batch file will then copy "/sources.dynamic" into
+"/src/single_dir_for_windows_kernel/", which will cause a dynamic
+library to be built. It will also copy "src/driver_entry.c" into
+"/src/single_dir_for_windows_kernel/", since the linker requires
+the DriverEntry function to exist for dynamic libraries, even
+though it's not used.
+
+
--- /dev/null
+@echo off
+rmdir /q /s single_dir_for_windows_kernel 1>nul 2>nul
+mkdir single_dir_for_windows_kernel 1>nul 2>nul
+
+copy /y ..\..\src\libbenchmark_benchmarkinstance\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_benchmarks_btree_au_readn_writen\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_benchmarkset\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_benchmarks_freelist_push1_then_pop1\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_benchmarksuite\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_datastructures_btree_au\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_datastructures_freelist\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_datastructures_queue_umm\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_misc\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_porting_abstraction_layer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_results\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_threadset\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_topology\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_topology_node\* single_dir_for_windows_kernel\ 1>nul 2>nul
+
+copy /y ..\..\src\libbenchmark_internal.h single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y driver_entry_renamed_to_avoid_compiler_warning.c single_dir_for_windows_kernel\driver_entry.c 1>nul 2>nul
+copy /y sources.dynamic single_dir_for_windows_kernel\sources 1>nul 2>nul
+
+echo Windows kernel dynamic library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+@echo off
+rmdir /q /s single_dir_for_windows_kernel 1>nul 2>nul
+mkdir single_dir_for_windows_kernel 1>nul 2>nul
+
+copy /y ..\..\src\libbenchmark_benchmarkinstance\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_benchmarks_btree_au_readn_writen\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_benchmarkset\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_benchmarks_freelist_push1_then_pop1\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_benchmarksuite\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_datastructures_btree_au\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_datastructures_freelist\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_datastructures_queue_umm\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_misc\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_porting_abstraction_layer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_results\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_threadset\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_topology\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libbenchmark_topology_node\* single_dir_for_windows_kernel\ 1>nul 2>nul
+
+copy /y ..\..\src\libbenchmark_internal.h single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y sources.static single_dir_for_windows_kernel\sources 1>nul 2>nul
+
+echo Windows kernel static library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /wd4127 /W4
+DLLDEF = ../libbenchmark.def
+TARGETNAME = libbenchmark
+TARGETPATH = ../../../bin/
+TARGETTYPE = EXPORT_DRIVER
+UMTYPE = nt
+USER_C_FLAGS = /DKERNEL_MODE /DNDEBUG
+
+INCLUDES = ../../../inc/
+SOURCES = libbenchmark_benchmarkinstance_cleanup.c \
+ libbenchmark_benchmarkinstance_init.c \
+ libbenchmark_benchmarkinstance_run.c \
+ libbenchmark_benchmarkset_add.c \
+ libbenchmark_benchmarkset_cleanup.c \
+ libbenchmark_benchmarkset_gnuplot.c \
+ libbenchmark_benchmarkset_init.c \
+ libbenchmark_benchmarkset_run.c \
+ libbenchmark_benchmarks_btree_au_gcc_spinlock_atomic_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_gcc_spinlock_sync_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_liblfds700_lockfree_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_liblfds710_lockfree_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_msvc_spinlock_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_pthread_mutex_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_pthread_rwlock_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_pthread_spinlock_process_private_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_pthread_spinlock_process_shared_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_windows_critical_section_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_windows_mutex_readn_writen.c \
+ libbenchmark_benchmarks_freelist_gcc_spinlock_atomic_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_gcc_spinlock_sync_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_liblfds700_lockfree_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_liblfds710_lockfree_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_msvc_spinlock_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_pthread_mutex_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_pthread_rwlock_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_pthread_spinlock_process_private_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_pthread_spinlock_process_shared_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_windows_critical_section_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_windows_mutex_push1_then_pop1.c \
+ libbenchmark_benchmarks_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_liblfds700_lockfree_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_liblfds710_lockfree_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_msvc_spinlock_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_pthread_mutex_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_pthread_rwlock_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_windows_critical_section_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_windows_mutex_enqueue1_dequeue1.c \
+ libbenchmark_benchmarksuite_add.c \
+ libbenchmark_benchmarksuite_cleanup.c \
+ libbenchmark_benchmarksuite_gnuplot.c \
+ libbenchmark_benchmarksuite_init.c \
+ libbenchmark_benchmarksuite_run.c \
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic.c \
+ libbenchmark_datastructure_btree_au_gcc_spinlock_sync.c \
+ libbenchmark_datastructure_btree_au_msvc_spinlock.c \
+ libbenchmark_datastructure_btree_au_pthread_mutex.c \
+ libbenchmark_datastructure_btree_au_pthread_rwlock.c \
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_private.c \
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared.c \
+ libbenchmark_datastructure_btree_au_windows_critical_section.c \
+ libbenchmark_datastructure_btree_au_windows_mutex.c \
+ libbenchmark_datastructure_freelist_gcc_spinlock_atomic.c \
+ libbenchmark_datastructure_freelist_gcc_spinlock_sync.c \
+ libbenchmark_datastructure_freelist_msvc_spinlock.c \
+ libbenchmark_datastructure_freelist_pthread_mutex.c \
+ libbenchmark_datastructure_freelist_pthread_rwlock.c \
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_private.c \
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_shared.c \
+ libbenchmark_datastructure_freelist_windows_critical_section.c \
+ libbenchmark_datastructure_freelist_windows_mutex.c \
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic.c \
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_sync.c \
+ libbenchmark_datastructure_queue_umm_msvc_spinlock.c \
+ libbenchmark_datastructure_queue_umm_pthread_mutex.c \
+ libbenchmark_datastructure_queue_umm_pthread_rwlock.c \
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private.c \
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared.c \
+ libbenchmark_datastructure_queue_umm_windows_critical_section.c \
+ libbenchmark_datastructure_queue_umm_windows_mutex.c \
+ libbenchmark_misc_globals.c \
+ libbenchmark_misc_pal_helpers.c \
+ libbenchmark_misc_query.c \
+ libbenchmark_porting_abstraction_layer_populate_topology.c \
+ libbenchmark_porting_abstraction_layer_print_string.c \
+ libbenchmark_results_cleanup.c \
+ libbenchmark_results_compare.c \
+ libbenchmark_results_get_result.c \
+ libbenchmark_results_init.c \
+ libbenchmark_results_put_result.c \
+ libbenchmark_threadset_cleanup.c \
+ libbenchmark_threadset_init.c \
+ libbenchmark_threadset_operations.c \
+ libbenchmark_topology_cleanup.c \
+ libbenchmark_topology_compare.c \
+ libbenchmark_topology_init.c \
+ libbenchmark_topology_insert.c \
+ libbenchmark_topology_iterate.c \
+ libbenchmark_topology_lpsets.c \
+ libbenchmark_topology_numa.c \
+ libbenchmark_topology_query.c \
+ libbenchmark_topology_string.c \
+ libbenchmark_topology_node_cleanup.c \
+ libbenchmark_topology_node_compare.c \
+ libbenchmark_topology_node_init.c \
+ driver_entry.c
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /wd4127 /W4
+TARGETNAME = libenchmark
+TARGETPATH = ../../../bin/
+TARGETTYPE = DRIVER_LIBRARY
+UMTYPE = nt
+USER_C_FLAGS = /DKERNEL_MODE /DNDEBUG
+
+INCLUDES = ../../../inc/
+SOURCES = libbenchmark_benchmarkinstance_cleanup.c \
+ libbenchmark_benchmarkinstance_init.c \
+ libbenchmark_benchmarkinstance_run.c \
+ libbenchmark_benchmarkset_add.c \
+ libbenchmark_benchmarkset_cleanup.c \
+ libbenchmark_benchmarkset_gnuplot.c \
+ libbenchmark_benchmarkset_init.c \
+ libbenchmark_benchmarkset_run.c \
+ libbenchmark_benchmarks_btree_au_gcc_spinlock_atomic_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_gcc_spinlock_sync_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_liblfds700_lockfree_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_liblfds710_lockfree_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_msvc_spinlock_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_pthread_mutex_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_pthread_rwlock_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_pthread_spinlock_process_private_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_pthread_spinlock_process_shared_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_windows_critical_section_readn_writen.c \
+ libbenchmark_benchmarks_btree_au_windows_mutex_readn_writen.c \
+ libbenchmark_benchmarks_freelist_gcc_spinlock_atomic_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_gcc_spinlock_sync_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_liblfds700_lockfree_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_liblfds710_lockfree_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_msvc_spinlock_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_pthread_mutex_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_pthread_rwlock_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_pthread_spinlock_process_private_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_pthread_spinlock_process_shared_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_windows_critical_section_push1_then_pop1.c \
+ libbenchmark_benchmarks_freelist_windows_mutex_push1_then_pop1.c \
+ libbenchmark_benchmarks_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_liblfds700_lockfree_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_liblfds710_lockfree_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_msvc_spinlock_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_pthread_mutex_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_pthread_rwlock_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_windows_critical_section_enqueue1_dequeue1.c \
+ libbenchmark_benchmarks_queue_umm_windows_mutex_enqueue1_dequeue1.c \
+ libbenchmark_benchmarksuite_add.c \
+ libbenchmark_benchmarksuite_cleanup.c \
+ libbenchmark_benchmarksuite_gnuplot.c \
+ libbenchmark_benchmarksuite_init.c \
+ libbenchmark_benchmarksuite_run.c \
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic.c \
+ libbenchmark_datastructure_btree_au_gcc_spinlock_sync.c \
+ libbenchmark_datastructure_btree_au_msvc_spinlock.c \
+ libbenchmark_datastructure_btree_au_pthread_mutex.c \
+ libbenchmark_datastructure_btree_au_pthread_rwlock.c \
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_private.c \
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared.c \
+ libbenchmark_datastructure_btree_au_windows_critical_section.c \
+ libbenchmark_datastructure_btree_au_windows_mutex.c \
+ libbenchmark_datastructure_freelist_gcc_spinlock_atomic.c \
+ libbenchmark_datastructure_freelist_gcc_spinlock_sync.c \
+ libbenchmark_datastructure_freelist_msvc_spinlock.c \
+ libbenchmark_datastructure_freelist_pthread_mutex.c \
+ libbenchmark_datastructure_freelist_pthread_rwlock.c \
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_private.c \
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_shared.c \
+ libbenchmark_datastructure_freelist_windows_critical_section.c \
+ libbenchmark_datastructure_freelist_windows_mutex.c \
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic.c \
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_sync.c \
+ libbenchmark_datastructure_queue_umm_msvc_spinlock.c \
+ libbenchmark_datastructure_queue_umm_pthread_mutex.c \
+ libbenchmark_datastructure_queue_umm_pthread_rwlock.c \
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private.c \
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared.c \
+ libbenchmark_datastructure_queue_umm_windows_critical_section.c \
+ libbenchmark_datastructure_queue_umm_windows_mutex.c \
+ libbenchmark_misc_globals.c \
+ libbenchmark_misc_pal_helpers.c \
+ libbenchmark_misc_query.c \
+ libbenchmark_porting_abstraction_layer_populate_topology.c \
+ libbenchmark_porting_abstraction_layer_print_string.c \
+ libbenchmark_results_cleanup.c \
+ libbenchmark_results_compare.c \
+ libbenchmark_results_get_result.c \
+ libbenchmark_results_init.c \
+ libbenchmark_results_put_result.c \
+ libbenchmark_threadset_cleanup.c \
+ libbenchmark_threadset_init.c \
+ libbenchmark_threadset_operations.c \
+ libbenchmark_topology_cleanup.c \
+ libbenchmark_topology_compare.c \
+ libbenchmark_topology_init.c \
+ libbenchmark_topology_insert.c \
+ libbenchmark_topology_iterate.c \
+ libbenchmark_topology_lpsets.c \
+ libbenchmark_topology_numa.c \
+ libbenchmark_topology_query.c \
+ libbenchmark_topology_string.c \
+ libbenchmark_topology_node_cleanup.c \
+ libbenchmark_topology_node_compare.c \
+ libbenchmark_topology_node_init.c
+
--- /dev/null
+#ifndef LIBBENCHMARK_H
+
+ /***** defines *****/
+ #define LIBBENCHMARK_H
+
+ /***** platform includes *****/
+ #include "libbenchmark/libbenchmark_porting_abstraction_layer_operating_system.h"
+
+ /***** extermal includes *****/
+ #include "../../../liblfds710/inc/liblfds710.h"
+ #include "../../libshared/inc/libshared.h"
+ #include "../../../../liblfds7.0.0/liblfds700/inc/liblfds700.h"
+
+ /***** pragmas on *****/
+ // TRD : the ditzy 7.0.0 header doesn't use push
+ #pragma warning( push )
+ #pragma warning( disable : 4324 )
+
+ /***** includes *****/
+ #include "libbenchmark/libbenchmark_topology_node.h"
+ #include "libbenchmark/libbenchmark_topology.h"
+
+ #include "libbenchmark/libbenchmark_porting_abstraction_layer.h"
+ #include "libbenchmark/libbenchmark_porting_abstraction_layer_lock_gcc_spinlock_atomic.h"
+ #include "libbenchmark/libbenchmark_porting_abstraction_layer_lock_gcc_spinlock_sync.h"
+ #include "libbenchmark/libbenchmark_porting_abstraction_layer_lock_msvc_spinlock.h"
+ #include "libbenchmark/libbenchmark_porting_abstraction_layer_lock_pthread_mutex.h"
+ #include "libbenchmark/libbenchmark_porting_abstraction_layer_lock_pthread_rwlock.h"
+ #include "libbenchmark/libbenchmark_porting_abstraction_layer_lock_pthread_spinlock_process_private.h"
+ #include "libbenchmark/libbenchmark_porting_abstraction_layer_lock_pthread_spinlock_process_shared.h"
+ #include "libbenchmark/libbenchmark_porting_abstraction_layer_lock_windows_critical_section.h"
+ #include "libbenchmark/libbenchmark_porting_abstraction_layer_lock_windows_mutex.h"
+
+ #include "libbenchmark/libbenchmark_enums.h"
+ #include "libbenchmark/libbenchmark_gnuplot.h"
+ #include "libbenchmark/libbenchmark_results.h"
+ #include "libbenchmark/libbenchmark_threadset.h"
+ #include "libbenchmark/libbenchmark_benchmarks_btree_au_readn_writen.h"
+ #include "libbenchmark/libbenchmark_benchmarks_freelist_push1_then_pop1.h"
+ #include "libbenchmark/libbenchmark_benchmarks_queue_umm_enqueue1_then_dequeue1.h"
+ #include "libbenchmark/libbenchmark_benchmarkinstance.h"
+ #include "libbenchmark/libbenchmark_benchmarkset.h"
+ #include "libbenchmark/libbenchmark_benchmarksuite.h"
+ #include "libbenchmark/libbenchmark_prng.h"
+
+ #include "libbenchmark/libbenchmark_datastructure_btree_au_gcc_spinlock_atomic.h"
+ #include "libbenchmark/libbenchmark_datastructure_btree_au_gcc_spinlock_sync.h"
+ #include "libbenchmark/libbenchmark_datastructure_btree_au_msvc_spinlock.h"
+ #include "libbenchmark/libbenchmark_datastructure_btree_au_pthread_mutex.h"
+ #include "libbenchmark/libbenchmark_datastructure_btree_au_pthread_rwlock.h"
+ #include "libbenchmark/libbenchmark_datastructure_btree_au_pthread_spinlock_process_private.h"
+ #include "libbenchmark/libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared.h"
+ #include "libbenchmark/libbenchmark_datastructure_btree_au_windows_critical_section.h"
+ #include "libbenchmark/libbenchmark_datastructure_btree_au_windows_mutex.h"
+
+ #include "libbenchmark/libbenchmark_datastructure_freelist_gcc_spinlock_atomic.h"
+ #include "libbenchmark/libbenchmark_datastructure_freelist_gcc_spinlock_sync.h"
+ #include "libbenchmark/libbenchmark_datastructure_freelist_msvc_spinlock.h"
+ #include "libbenchmark/libbenchmark_datastructure_freelist_pthread_mutex.h"
+ #include "libbenchmark/libbenchmark_datastructure_freelist_pthread_rwlock.h"
+ #include "libbenchmark/libbenchmark_datastructure_freelist_pthread_spinlock_process_private.h"
+ #include "libbenchmark/libbenchmark_datastructure_freelist_pthread_spinlock_process_shared.h"
+ #include "libbenchmark/libbenchmark_datastructure_freelist_windows_critical_section.h"
+ #include "libbenchmark/libbenchmark_datastructure_freelist_windows_mutex.h"
+
+ #include "libbenchmark/libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic.h"
+ #include "libbenchmark/libbenchmark_datastructure_queue_umm_gcc_spinlock_sync.h"
+ #include "libbenchmark/libbenchmark_datastructure_queue_umm_msvc_spinlock.h"
+ #include "libbenchmark/libbenchmark_datastructure_queue_umm_pthread_mutex.h"
+ #include "libbenchmark/libbenchmark_datastructure_queue_umm_pthread_rwlock.h"
+ #include "libbenchmark/libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private.h"
+ #include "libbenchmark/libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared.h"
+ #include "libbenchmark/libbenchmark_datastructure_queue_umm_windows_critical_section.h"
+ #include "libbenchmark/libbenchmark_datastructure_queue_umm_windows_mutex.h"
+
+ #include "libbenchmark/libbenchmark_misc.h"
+
+ /***** pragmas off *****/
+ #pragma warning( pop )
+
+#endif
+
--- /dev/null
+/***** enums *****/
+
+/***** structs *****/
+struct libbenchmark_benchmarkinstance_state
+{
+ enum flag
+ numa_awareness_flag;
+
+ enum libbenchmark_datastructure_id
+ datastructure_id;
+
+ enum libbenchmark_benchmark_id
+ benchmark_id;
+
+ enum libbenchmark_lock_id
+ lock_id;
+
+ struct libbenchmark_threadset_state
+ tsets;
+
+ struct libbenchmark_topology_state
+ *ts;
+
+ struct lfds710_list_asu_element
+ lasue;
+
+ void
+ (*init_function)( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_node,
+ struct libbenchmark_threadset_state *tsets ),
+ (*cleanup_function)( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_node,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets );
+};
+
+/***** public prototypes *****/
+void libbenchmark_benchmarkinstance_init( struct libbenchmark_benchmarkinstance_state *bs,
+ enum libbenchmark_datastructure_id datastructure_id,
+ enum libbenchmark_benchmark_id benchmark_id,
+ enum libbenchmark_lock_id lock_id,
+ struct libbenchmark_topology_state *ts,
+ void (*init_function)( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_node,
+ struct libbenchmark_threadset_state *tsets ),
+ void (*cleanup_function)( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_node,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets ) );
+
+void libbenchmark_benchmarkinstance_cleanup( struct libbenchmark_benchmarkinstance_state *bs );
+
+void libbenchmark_benchmarkinstance_run( struct libbenchmark_benchmarkinstance_state *bs,
+ struct lfds710_list_aso_state *lpset,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libshared_memory_state *ms,
+ struct libbenchmark_results_state *rs );
+
--- /dev/null
+/***** public prototypes *****/
+void libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+
+libshared_pal_thread_return_t libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_thread( void *libbenchmark_threadset_per_thread_state );
+
+void libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+
--- /dev/null
+/***** public prototypes *****/
+void libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+
+libshared_pal_thread_return_t libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state );
+
+void libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+
--- /dev/null
+/***** public prototypes *****/
+void libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts, struct lfds710_list_aso_state *logical_processor_set, struct libshared_memory_state *ms, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_threadset_state *tsets );
+
+libshared_pal_thread_return_t libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state );
+libshared_pal_thread_return_t libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state );
+
+void libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+void libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set, enum libbenchmark_topology_numa_mode numa_node, struct libbenchmark_results_state *rs, struct libbenchmark_threadset_state *tsets );
+
--- /dev/null
+/***** defines *****/
+
+/***** enums *****/
+
+/***** structs *****/
+struct libbenchmark_benchmarkset_state
+{
+ enum libbenchmark_datastructure_id
+ datastructure_id;
+
+ enum libbenchmark_benchmark_id
+ benchmark_id;
+
+ struct lfds710_list_asu_element
+ lasue;
+
+ struct lfds710_list_asu_state
+ benchmarks,
+ *logical_processor_sets,
+ *numa_modes_list;
+
+ struct libshared_memory_state
+ *ms;
+
+ struct libbenchmark_topology_state
+ *ts;
+};
+
+struct libbenchmark_benchmarkset_gnuplot
+{
+ char
+ filename[256],
+ *gnuplot_string;
+
+ enum libbenchmark_benchmark_id
+ benchmark_id;
+
+ enum libbenchmark_datastructure_id
+ datastructure_id;
+
+ struct lfds710_list_asu_element
+ lasue;
+};
+
+/***** public prototypes *****/
+void libbenchmark_benchmarkset_init( struct libbenchmark_benchmarkset_state *bsets,
+ enum libbenchmark_datastructure_id datastructure_id,
+ enum libbenchmark_benchmark_id benchmark_id,
+ struct lfds710_list_asu_state *logical_processor_sets,
+ struct lfds710_list_asu_state *numa_modes_list,
+ struct libbenchmark_topology_state *ts,
+ struct libshared_memory_state *ms );
+
+void libbenchmark_benchmarkset_cleanup( struct libbenchmark_benchmarkset_state *bsets );
+
+void libbenchmark_benchmarkset_add_benchmark( struct libbenchmark_benchmarkset_state *bsets, struct libbenchmark_benchmarkinstance_state *bs );
+
+void libbenchmark_benchmarkset_run( struct libbenchmark_benchmarkset_state *bsets, struct libbenchmark_results_state *rs );
+
+void libbenchmark_benchmarkset_gnuplot_emit( struct libbenchmark_benchmarkset_state *bsets,
+ struct libbenchmark_results_state *rs,
+ char *gnuplot_system_string,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_gnuplot_options *gpo,
+ struct libbenchmark_benchmarkset_gnuplot *bg );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_BENCHMARKSUITE_OPTION_DURATION 0x1
+
+/***** enums *****/
+
+/***** structs *****/
+struct libbenchmark_benchmarksuite_state
+{
+ struct lfds710_list_asu_state
+ benchmarksets,
+ lpsets,
+ numa_modes_list;
+
+ struct libshared_memory_state
+ *ms;
+
+ struct libbenchmark_topology_state
+ *ts;
+};
+
+/***** public prototypes *****/
+void libbenchmark_benchmarksuite_init( struct libbenchmark_benchmarksuite_state *bss,
+ struct libbenchmark_topology_state *ts,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ lfds710_pal_uint_t options_bitmask,
+ lfds710_pal_uint_t benchmark_duration_in_seconds );
+
+void libbenchmark_benchmarksuite_cleanup( struct libbenchmark_benchmarksuite_state *bss );
+
+void libbenchmark_benchmarksuite_add_benchmarkset( struct libbenchmark_benchmarksuite_state *bss,
+ struct libbenchmark_benchmarkset_state *bsets );
+
+void libbenchmark_benchmarksuite_run( struct libbenchmark_benchmarksuite_state *bss,
+ struct libbenchmark_results_state *rs );
+
+void libbenchmark_benchmarksuite_get_list_of_gnuplot_strings( struct libbenchmark_benchmarksuite_state *bss,
+ struct libbenchmark_results_state *rs,
+ char *gnuplot_system_string,
+ struct libbenchmark_gnuplot_options *gpo,
+ struct lfds710_list_asu_state *list_of_gnuplot_strings );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_GET_KEY_FROM_ELEMENT( btree_au_state, btree_au_element ) ( (btree_au_element).key )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_SET_KEY_IN_ELEMENT( btree_au_state, btree_au_element, new_key ) ( (btree_au_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_GET_VALUE_FROM_ELEMENT( btree_au_state, btree_au_element, existing_value ) { LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_GET((btree_au_state).lock); (existing_value) = (btree_au_element).value; LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_SET_VALUE_IN_ELEMENT( btree_au_state, btree_au_element, new_value ) { LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_GET((btree_au_state).lock); (btree_au_element).value = (void *) (lfds710_pal_uint_t) (new_value); LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_GET_USER_STATE_FROM_STATE( btree_au_state ) ( (btree_au_state).user_state )
+
+/***** enums *****/
+enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_absolute_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_ABSOLUTE_POSITION_ROOT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_ABSOLUTE_POSITION_SMALLEST_IN_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_ABSOLUTE_POSITION_LARGEST_IN_TREE
+};
+
+enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_existing_key
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_EXISTING_KEY_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_EXISTING_KEY_FAIL
+};
+
+enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_insert_result
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_INSERT_RESULT_FAILURE_EXISTING_KEY,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_INSERT_RESULT_SUCCESS_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_INSERT_RESULT_SUCCESS
+};
+
+enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_relative_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_UP,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_LEFT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_RIGHT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE
+};
+
+/***** structs *****/
+struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element
+{
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element
+ *left,
+ *right,
+ *up;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state
+{
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element
+ *root;
+
+ pal_lock_gcc_spinlock_atomic_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock;
+
+ int
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_existing_key
+ existing_key;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_init( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_existing_key existing_key,
+ void *user_state );
+
+void libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_cleanup( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element *baue) );
+
+enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_insert_result libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_insert( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element *baue,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **existing_baue );
+
+int libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_key( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **baue );
+
+int libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **baue,
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_absolute_position absolute_position,
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_relative_position relative_position );
+
+int libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **baue,
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_absolute_position absolute_position );
+
+int libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_relative_position( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **baue,
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_relative_position relative_position );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_GET_KEY_FROM_ELEMENT( btree_au_state, btree_au_element ) ( (btree_au_element).key )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_SET_KEY_IN_ELEMENT( btree_au_state, btree_au_element, new_key ) ( (btree_au_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_GET_VALUE_FROM_ELEMENT( btree_au_state, btree_au_element, existing_value ) { LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_GET((btree_au_state).lock); (existing_value) = (btree_au_element).value; LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_SET_VALUE_IN_ELEMENT( btree_au_state, btree_au_element, new_value ) { LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_GET((btree_au_state).lock); (btree_au_element).value = (void *) (lfds710_pal_uint_t) (new_value); LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_GET_USER_STATE_FROM_STATE( btree_au_state ) ( (btree_au_state).user_state )
+
+/***** enums *****/
+enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_absolute_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_ABSOLUTE_POSITION_ROOT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_ABSOLUTE_POSITION_SMALLEST_IN_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_ABSOLUTE_POSITION_LARGEST_IN_TREE
+};
+
+enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_existing_key
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_EXISTING_KEY_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_EXISTING_KEY_FAIL
+};
+
+enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_insert_result
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_INSERT_RESULT_FAILURE_EXISTING_KEY,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_INSERT_RESULT_SUCCESS_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_INSERT_RESULT_SUCCESS
+};
+
+enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_relative_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_UP,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_LEFT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_RIGHT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE
+};
+
+/***** structs *****/
+struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element
+{
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element
+ *left,
+ *right,
+ *up;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state
+{
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element
+ *root;
+
+ pal_lock_gcc_spinlock_sync_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock;
+
+ int
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_existing_key
+ existing_key;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_btree_au_gcc_spinlock_sync_init( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_existing_key existing_key,
+ void *user_state );
+
+void libbenchmark_datastructure_btree_au_gcc_spinlock_sync_cleanup( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element *baue) );
+
+enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_insert_result libbenchmark_datastructure_btree_au_gcc_spinlock_sync_insert( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element *baue,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **existing_baue );
+
+int libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_key( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **baue );
+
+int libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **baue,
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_absolute_position absolute_position,
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_relative_position relative_position );
+
+int libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **baue,
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_absolute_position absolute_position );
+
+int libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_relative_position( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **baue,
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_relative_position relative_position );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_GET_KEY_FROM_ELEMENT( btree_au_state, btree_au_element ) ( (btree_au_element).key )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_SET_KEY_IN_ELEMENT( btree_au_state, btree_au_element, new_key ) ( (btree_au_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_GET_VALUE_FROM_ELEMENT( btree_au_state, btree_au_element, existing_value ) { LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_GET((btree_au_state).lock); (existing_value) = (btree_au_element).value; LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_SET_VALUE_IN_ELEMENT( btree_au_state, btree_au_element, new_value ) { LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_GET((btree_au_state).lock); (btree_au_element).value = (void *) (lfds710_pal_uint_t) (new_value); LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_GET_USER_STATE_FROM_STATE( btree_au_state ) ( (btree_au_state).user_state )
+
+/***** enums *****/
+enum libbenchmark_datastructure_btree_au_msvc_spinlock_absolute_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_ABSOLUTE_POSITION_ROOT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_ABSOLUTE_POSITION_SMALLEST_IN_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_ABSOLUTE_POSITION_LARGEST_IN_TREE
+};
+
+enum libbenchmark_datastructure_btree_au_msvc_spinlock_existing_key
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_EXISTING_KEY_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_EXISTING_KEY_FAIL
+};
+
+enum libbenchmark_datastructure_btree_au_msvc_spinlock_insert_result
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_INSERT_RESULT_FAILURE_EXISTING_KEY,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_INSERT_RESULT_SUCCESS_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_INSERT_RESULT_SUCCESS
+};
+
+enum libbenchmark_datastructure_btree_au_msvc_spinlock_relative_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_UP,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_LEFT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_RIGHT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE
+};
+
+/***** structs *****/
+struct libbenchmark_datastructure_btree_au_msvc_spinlock_element
+{
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element
+ *left,
+ *right,
+ *up;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_btree_au_msvc_spinlock_state
+{
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element
+ *root;
+
+ pal_lock_msvc_spinlock_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock;
+
+ int
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ enum libbenchmark_datastructure_btree_au_msvc_spinlock_existing_key
+ existing_key;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_btree_au_msvc_spinlock_init( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_msvc_spinlock_existing_key existing_key,
+ void *user_state );
+
+void libbenchmark_datastructure_btree_au_msvc_spinlock_cleanup( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus, struct libbenchmark_datastructure_btree_au_msvc_spinlock_element *baue) );
+
+enum libbenchmark_datastructure_btree_au_msvc_spinlock_insert_result libbenchmark_datastructure_btree_au_msvc_spinlock_insert( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus,
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element *baue,
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **existing_baue );
+
+int libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_key( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **baue );
+
+int libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus,
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **baue,
+ enum libbenchmark_datastructure_btree_au_msvc_spinlock_absolute_position absolute_position,
+ enum libbenchmark_datastructure_btree_au_msvc_spinlock_relative_position relative_position );
+
+int libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus,
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **baue,
+ enum libbenchmark_datastructure_btree_au_msvc_spinlock_absolute_position absolute_position );
+
+int libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_relative_position( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus,
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **baue,
+ enum libbenchmark_datastructure_btree_au_msvc_spinlock_relative_position relative_position );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_GET_KEY_FROM_ELEMENT( btree_au_state, btree_au_element ) ( (btree_au_element).key )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_SET_KEY_IN_ELEMENT( btree_au_state, btree_au_element, new_key ) ( (btree_au_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_GET_VALUE_FROM_ELEMENT( btree_au_state, btree_au_element, existing_value ) { LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_GET((btree_au_state).lock); (existing_value) = (btree_au_element).value; LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_SET_VALUE_IN_ELEMENT( btree_au_state, btree_au_element, new_value ) { LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_GET((btree_au_state).lock); (btree_au_element).value = (void *) (lfds710_pal_uint_t) (new_value); LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_GET_USER_STATE_FROM_STATE( btree_au_state ) ( (btree_au_state).user_state )
+
+/***** enums *****/
+enum libbenchmark_datastructure_btree_au_pthread_mutex_absolute_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_ABSOLUTE_POSITION_ROOT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_ABSOLUTE_POSITION_SMALLEST_IN_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_ABSOLUTE_POSITION_LARGEST_IN_TREE
+};
+
+enum libbenchmark_datastructure_btree_au_pthread_mutex_existing_key
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_EXISTING_KEY_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_EXISTING_KEY_FAIL
+};
+
+enum libbenchmark_datastructure_btree_au_pthread_mutex_insert_result
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_INSERT_RESULT_FAILURE_EXISTING_KEY,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_INSERT_RESULT_SUCCESS_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_INSERT_RESULT_SUCCESS
+};
+
+enum libbenchmark_datastructure_btree_au_pthread_mutex_relative_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_UP,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_LEFT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_RIGHT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE
+};
+
+/***** structs *****/
+struct libbenchmark_datastructure_btree_au_pthread_mutex_element
+{
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element
+ *left,
+ *right,
+ *up;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_btree_au_pthread_mutex_state
+{
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element
+ *root;
+
+ pal_lock_pthread_mutex_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock;
+
+ int
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ enum libbenchmark_datastructure_btree_au_pthread_mutex_existing_key
+ existing_key;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_btree_au_pthread_mutex_init( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_pthread_mutex_existing_key existing_key,
+ void *user_state );
+
+void libbenchmark_datastructure_btree_au_pthread_mutex_cleanup( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus, struct libbenchmark_datastructure_btree_au_pthread_mutex_element *baue) );
+
+enum libbenchmark_datastructure_btree_au_pthread_mutex_insert_result libbenchmark_datastructure_btree_au_pthread_mutex_insert( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element *baue,
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element **existing_baue );
+
+int libbenchmark_datastructure_btree_au_pthread_mutex_get_by_key( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element **baue );
+
+int libbenchmark_datastructure_btree_au_pthread_mutex_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element **baue,
+ enum libbenchmark_datastructure_btree_au_pthread_mutex_absolute_position absolute_position,
+ enum libbenchmark_datastructure_btree_au_pthread_mutex_relative_position relative_position );
+
+int libbenchmark_datastructure_btree_au_pthread_mutex_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element **baue,
+ enum libbenchmark_datastructure_btree_au_pthread_mutex_absolute_position absolute_position );
+
+int libbenchmark_datastructure_btree_au_pthread_mutex_get_by_relative_position( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element **baue,
+ enum libbenchmark_datastructure_btree_au_pthread_mutex_relative_position relative_position );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_GET_KEY_FROM_ELEMENT( btree_au_state, btree_au_element ) ( (btree_au_element).key )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_SET_KEY_IN_ELEMENT( btree_au_state, btree_au_element, new_key ) ( (btree_au_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_GET_VALUE_FROM_ELEMENT( btree_au_state, btree_au_element, existing_value ) { LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_READ((btree_au_state).lock); (existing_value) = (btree_au_element).value; LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_SET_VALUE_IN_ELEMENT( btree_au_state, btree_au_element, new_value ) { LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_WRITE((btree_au_state).lock); (btree_au_element).value = (void *) (lfds710_pal_uint_t) (new_value); LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_GET_USER_STATE_FROM_STATE( btree_au_state ) ( (btree_au_state).user_state )
+
+/***** enums *****/
+enum libbenchmark_datastructure_btree_au_pthread_rwlock_absolute_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_ABSOLUTE_POSITION_ROOT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_ABSOLUTE_POSITION_SMALLEST_IN_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_ABSOLUTE_POSITION_LARGEST_IN_TREE
+};
+
+enum libbenchmark_datastructure_btree_au_pthread_rwlock_existing_key
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_EXISTING_KEY_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_EXISTING_KEY_FAIL
+};
+
+enum libbenchmark_datastructure_btree_au_pthread_rwlock_insert_result
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_INSERT_RESULT_FAILURE_EXISTING_KEY,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_INSERT_RESULT_SUCCESS_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_INSERT_RESULT_SUCCESS
+};
+
+enum libbenchmark_datastructure_btree_au_pthread_rwlock_relative_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_UP,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_LEFT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_RIGHT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE
+};
+
+/***** structs *****/
+struct libbenchmark_datastructure_btree_au_pthread_rwlock_element
+{
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element
+ *left,
+ *right,
+ *up;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_btree_au_pthread_rwlock_state
+{
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element
+ *root;
+
+ pal_lock_pthread_rwlock_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock;
+
+ int
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ enum libbenchmark_datastructure_btree_au_pthread_rwlock_existing_key
+ existing_key;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_btree_au_pthread_rwlock_init( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_pthread_rwlock_existing_key existing_key,
+ void *user_state );
+
+void libbenchmark_datastructure_btree_au_pthread_rwlock_cleanup( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus, struct libbenchmark_datastructure_btree_au_pthread_rwlock_element *baue) );
+
+enum libbenchmark_datastructure_btree_au_pthread_rwlock_insert_result libbenchmark_datastructure_btree_au_pthread_rwlock_insert( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element *baue,
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **existing_baue );
+
+int libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_key_for_read( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue );
+
+int libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_key_for_write( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue );
+
+int libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_absolute_position_and_then_by_relative_position_for_read( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue,
+ enum libbenchmark_datastructure_btree_au_pthread_rwlock_absolute_position absolute_position,
+ enum libbenchmark_datastructure_btree_au_pthread_rwlock_relative_position relative_position );
+
+int libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_absolute_position_for_read( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue,
+ enum libbenchmark_datastructure_btree_au_pthread_rwlock_absolute_position absolute_position );
+
+int libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_relative_position_for_read( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue,
+ enum libbenchmark_datastructure_btree_au_pthread_rwlock_relative_position relative_position );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET_KEY_FROM_ELEMENT( btree_au_state, btree_au_element ) ( (btree_au_element).key )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_KEY_IN_ELEMENT( btree_au_state, btree_au_element, new_key ) ( (btree_au_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET_VALUE_FROM_ELEMENT( btree_au_state, btree_au_element, existing_value ) { LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET((btree_au_state).lock); (existing_value) = (btree_au_element).value; LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_VALUE_IN_ELEMENT( btree_au_state, btree_au_element, new_value ) { LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET((btree_au_state).lock); (btree_au_element).value = (void *) (lfds710_pal_uint_t) (new_value); LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET_USER_STATE_FROM_STATE( btree_au_state ) ( (btree_au_state).user_state )
+
+/***** enums *****/
+enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_absolute_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_ABSOLUTE_POSITION_ROOT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_ABSOLUTE_POSITION_SMALLEST_IN_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_ABSOLUTE_POSITION_LARGEST_IN_TREE
+};
+
+enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_existing_key
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_EXISTING_KEY_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_EXISTING_KEY_FAIL
+};
+
+enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_insert_result
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_INSERT_RESULT_FAILURE_EXISTING_KEY,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_INSERT_RESULT_SUCCESS_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_INSERT_RESULT_SUCCESS
+};
+
+enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_relative_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_UP,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_LEFT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_RIGHT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE
+};
+
+/***** structs *****/
+struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element
+{
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element
+ *left,
+ *right,
+ *up;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state
+{
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element
+ *root;
+
+ pal_lock_pthread_spinlock_process_private_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock;
+
+ int
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_existing_key
+ existing_key;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_init( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_existing_key existing_key,
+ void *user_state );
+
+void libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_cleanup( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element *baue) );
+
+enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_insert_result libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_insert( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element *baue,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **existing_baue );
+
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_key( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **baue );
+
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **baue,
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_absolute_position absolute_position,
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_relative_position relative_position );
+
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **baue,
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_absolute_position absolute_position );
+
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_relative_position( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **baue,
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_relative_position relative_position );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_GET_KEY_FROM_ELEMENT( btree_au_state, btree_au_element ) ( (btree_au_element).key )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_KEY_IN_ELEMENT( btree_au_state, btree_au_element, new_key ) ( (btree_au_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_GET_VALUE_FROM_ELEMENT( btree_au_state, btree_au_element, existing_value ) { LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET((btree_au_state).lock); existing_value = (btree_au_element).value; LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_VALUE_IN_ELEMENT( btree_au_state, btree_au_element, new_value ) { LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET((btree_au_state).lock); (btree_au_element).value = (void *) (lfds710_pal_uint_t) (new_value); LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_GET_USER_STATE_FROM_STATE( btree_au_state ) ( (btree_au_state).user_state )
+
+/***** enums *****/
+enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_absolute_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_ABSOLUTE_POSITION_ROOT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_ABSOLUTE_POSITION_SMALLEST_IN_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_ABSOLUTE_POSITION_LARGEST_IN_TREE
+};
+
+enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_existing_key
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_EXISTING_KEY_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_EXISTING_KEY_FAIL
+};
+
+enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_insert_result
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_INSERT_RESULT_FAILURE_EXISTING_KEY,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_INSERT_RESULT_SUCCESS_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_INSERT_RESULT_SUCCESS
+};
+
+enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_relative_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_UP,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_LEFT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_RIGHT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE
+};
+
+/***** structs *****/
+struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element
+{
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element
+ *left,
+ *right,
+ *up;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state
+{
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element
+ *root;
+
+ pal_lock_pthread_spinlock_process_shared_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock;
+
+ int
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_existing_key
+ existing_key;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_init( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_existing_key existing_key,
+ void *user_state );
+
+void libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_cleanup( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element *baue) );
+
+enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_insert_result libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_insert( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element *baue,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **existing_baue );
+
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_key( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **baue );
+
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **baue,
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_absolute_position absolute_position,
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_relative_position relative_position );
+
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **baue,
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_absolute_position absolute_position );
+
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_relative_position( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **baue,
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_relative_position relative_position );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_GET_KEY_FROM_ELEMENT( btree_au_state, btree_au_element ) ( (btree_au_element).key )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_SET_KEY_IN_ELEMENT( btree_au_state, btree_au_element, new_key ) ( (btree_au_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_GET_VALUE_FROM_ELEMENT( btree_au_state, btree_au_element, existing_value ) { LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_GET((btree_au_state).lock); (existing_value) = (btree_au_element).value; LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_SET_VALUE_IN_ELEMENT( btree_au_state, btree_au_element, new_value ) { LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_GET((btree_au_state).lock); (btree_au_element).value = (void *) (lfds710_pal_uint_t) (new_value); LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_GET_USER_STATE_FROM_STATE( btree_au_state ) ( (btree_au_state).user_state )
+
+/***** enums *****/
+enum libbenchmark_datastructure_btree_au_windows_critical_section_absolute_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_ABSOLUTE_POSITION_ROOT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_ABSOLUTE_POSITION_SMALLEST_IN_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_ABSOLUTE_POSITION_LARGEST_IN_TREE
+};
+
+enum libbenchmark_datastructure_btree_au_windows_critical_section_existing_key
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_EXISTING_KEY_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_EXISTING_KEY_FAIL
+};
+
+enum libbenchmark_datastructure_btree_au_windows_critical_section_insert_result
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_INSERT_RESULT_FAILURE_EXISTING_KEY,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_INSERT_RESULT_SUCCESS_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_INSERT_RESULT_SUCCESS
+};
+
+enum libbenchmark_datastructure_btree_au_windows_critical_section_relative_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_UP,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_LEFT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_RIGHT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE
+};
+
+/***** structs *****/
+struct libbenchmark_datastructure_btree_au_windows_critical_section_element
+{
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element
+ *left,
+ *right,
+ *up;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_btree_au_windows_critical_section_state
+{
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element
+ *root;
+
+ pal_lock_windows_critical_section_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock;
+
+ int
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ enum libbenchmark_datastructure_btree_au_windows_critical_section_existing_key
+ existing_key;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_btree_au_windows_critical_section_init( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_windows_critical_section_existing_key existing_key,
+ void *user_state );
+
+void libbenchmark_datastructure_btree_au_windows_critical_section_cleanup( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus, struct libbenchmark_datastructure_btree_au_windows_critical_section_element *baue) );
+
+enum libbenchmark_datastructure_btree_au_windows_critical_section_insert_result libbenchmark_datastructure_btree_au_windows_critical_section_insert( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus,
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element *baue,
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element **existing_baue );
+
+int libbenchmark_datastructure_btree_au_windows_critical_section_get_by_key( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element **baue );
+
+int libbenchmark_datastructure_btree_au_windows_critical_section_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus,
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element **baue,
+ enum libbenchmark_datastructure_btree_au_windows_critical_section_absolute_position absolute_position,
+ enum libbenchmark_datastructure_btree_au_windows_critical_section_relative_position relative_position );
+
+int libbenchmark_datastructure_btree_au_windows_critical_section_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus,
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element **baue,
+ enum libbenchmark_datastructure_btree_au_windows_critical_section_absolute_position absolute_position );
+
+int libbenchmark_datastructure_btree_au_windows_critical_section_get_by_relative_position( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus,
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element **baue,
+ enum libbenchmark_datastructure_btree_au_windows_critical_section_relative_position relative_position );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_GET_KEY_FROM_ELEMENT( btree_au_state, btree_au_element ) ( (btree_au_element).key )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_SET_KEY_IN_ELEMENT( btree_au_state, btree_au_element, new_key ) ( (btree_au_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_GET_VALUE_FROM_ELEMENT( btree_au_state, btree_au_element, existing_value ) { LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_GET((btree_au_state).lock); (existing_value) = (btree_au_element).value; LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_SET_VALUE_IN_ELEMENT( btree_au_state, btree_au_element, new_value ) { LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_GET((btree_au_state).lock); (btree_au_element).value = (void *) (lfds710_pal_uint_t) (new_value); LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_RELEASE((btree_au_state).lock); }
+#define LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_GET_USER_STATE_FROM_STATE( btree_au_state ) ( (btree_au_state).user_state )
+
+/***** enums *****/
+enum libbenchmark_datastructure_btree_au_windows_mutex_absolute_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_ABSOLUTE_POSITION_ROOT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_ABSOLUTE_POSITION_SMALLEST_IN_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_ABSOLUTE_POSITION_LARGEST_IN_TREE
+};
+
+enum libbenchmark_datastructure_btree_au_windows_mutex_existing_key
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_EXISTING_KEY_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_EXISTING_KEY_FAIL
+};
+
+enum libbenchmark_datastructure_btree_au_windows_mutex_insert_result
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_INSERT_RESULT_FAILURE_EXISTING_KEY,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_INSERT_RESULT_SUCCESS_OVERWRITE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_INSERT_RESULT_SUCCESS
+};
+
+enum libbenchmark_datastructure_btree_au_windows_mutex_relative_position
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_UP,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_LEFT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_RIGHT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE
+};
+
+/***** structs *****/
+struct libbenchmark_datastructure_btree_au_windows_mutex_element
+{
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element
+ *left,
+ *right,
+ *up;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_btree_au_windows_mutex_state
+{
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element
+ *root;
+
+ pal_lock_windows_mutex_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock;
+
+ int
+ (*key_compare_function)( void const *new_key, void const *existing_key );
+
+ enum libbenchmark_datastructure_btree_au_windows_mutex_existing_key
+ existing_key;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_btree_au_windows_mutex_init( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_windows_mutex_existing_key existing_key,
+ void *user_state );
+
+void libbenchmark_datastructure_btree_au_windows_mutex_cleanup( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus, struct libbenchmark_datastructure_btree_au_windows_mutex_element *baue) );
+
+enum libbenchmark_datastructure_btree_au_windows_mutex_insert_result libbenchmark_datastructure_btree_au_windows_mutex_insert( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus,
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element *baue,
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element **existing_baue );
+
+int libbenchmark_datastructure_btree_au_windows_mutex_get_by_key( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element **baue );
+
+int libbenchmark_datastructure_btree_au_windows_mutex_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus,
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element **baue,
+ enum libbenchmark_datastructure_btree_au_windows_mutex_absolute_position absolute_position,
+ enum libbenchmark_datastructure_btree_au_windows_mutex_relative_position relative_position );
+
+int libbenchmark_datastructure_btree_au_windows_mutex_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus,
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element **baue,
+ enum libbenchmark_datastructure_btree_au_windows_mutex_absolute_position absolute_position );
+
+int libbenchmark_datastructure_btree_au_windows_mutex_get_by_relative_position( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus,
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element **baue,
+ enum libbenchmark_datastructure_btree_au_windows_mutex_relative_position relative_position );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_ATOMIC_GET_KEY_FROM_ELEMENT( freelist_element ) ( (freelist_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_ATOMIC_SET_KEY_IN_ELEMENT( freelist_element, new_key ) ( (freelist_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_ATOMIC_GET_VALUE_FROM_ELEMENT( freelist_element ) ( (freelist_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_ATOMIC_SET_VALUE_IN_ELEMENT( freelist_element, new_value ) ( (freelist_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_ATOMIC_GET_USER_STATE_FROM_STATE( freelist_state ) ( (freelist_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state
+{
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element
+ *top;
+
+ pal_lock_gcc_spinlock_atomic_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock;
+
+ void
+ *user_state;
+};
+
+struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element
+{
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_freelist_gcc_spinlock_atomic_init( struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state *fs, void *user_state );
+void libbenchmark_datastructure_freelist_gcc_spinlock_atomic_cleanup( struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state *fs, struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element *fe, void *user_state) );
+
+void libbenchmark_datastructure_freelist_gcc_spinlock_atomic_push( struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state *fs, struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element *fe );
+int libbenchmark_datastructure_freelist_gcc_spinlock_atomic_pop( struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state *fs, struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element **fe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_SYNC_GET_KEY_FROM_ELEMENT( freelist_element ) ( (freelist_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_SYNC_SET_KEY_IN_ELEMENT( freelist_element, new_key ) ( (freelist_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_SYNC_GET_VALUE_FROM_ELEMENT( freelist_element ) ( (freelist_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_SYNC_SET_VALUE_IN_ELEMENT( freelist_element, new_value ) ( (freelist_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_SYNC_GET_USER_STATE_FROM_STATE( freelist_state ) ( (freelist_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state
+{
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element
+ *top;
+
+ pal_lock_gcc_spinlock_sync_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock;
+
+ void
+ *user_state;
+};
+
+struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element
+{
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_freelist_gcc_spinlock_sync_init( struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state *fs, void *user_state );
+void libbenchmark_datastructure_freelist_gcc_spinlock_sync_cleanup( struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state *fs, struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element *fe, void *user_state) );
+
+void libbenchmark_datastructure_freelist_gcc_spinlock_sync_push( struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state *fs, struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element *fe );
+int libbenchmark_datastructure_freelist_gcc_spinlock_sync_pop( struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state *fs, struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element **fe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_SYNC_GET_KEY_FROM_ELEMENT( freelist_element ) ( (freelist_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_SYNC_SET_KEY_IN_ELEMENT( freelist_element, new_key ) ( (freelist_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_SYNC_GET_VALUE_FROM_ELEMENT( freelist_element ) ( (freelist_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_SYNC_SET_VALUE_IN_ELEMENT( freelist_element, new_value ) ( (freelist_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_GCC_SPINLOCK_SYNC_GET_USER_STATE_FROM_STATE( freelist_state ) ( (freelist_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_freelist_msvc_spinlock_state
+{
+ struct libbenchmark_datastructure_freelist_msvc_spinlock_element
+ *top;
+
+ pal_lock_msvc_spinlock_state
+ lock;
+
+ void
+ *user_state;
+};
+
+struct libbenchmark_datastructure_freelist_msvc_spinlock_element
+{
+ struct libbenchmark_datastructure_freelist_msvc_spinlock_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_freelist_msvc_spinlock_init( struct libbenchmark_datastructure_freelist_msvc_spinlock_state *fs, void *user_state );
+void libbenchmark_datastructure_freelist_msvc_spinlock_cleanup( struct libbenchmark_datastructure_freelist_msvc_spinlock_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_msvc_spinlock_state *fs, struct libbenchmark_datastructure_freelist_msvc_spinlock_element *fe, void *user_state) );
+
+void libbenchmark_datastructure_freelist_msvc_spinlock_push( struct libbenchmark_datastructure_freelist_msvc_spinlock_state *fs, struct libbenchmark_datastructure_freelist_msvc_spinlock_element *fe );
+int libbenchmark_datastructure_freelist_msvc_spinlock_pop( struct libbenchmark_datastructure_freelist_msvc_spinlock_state *fs, struct libbenchmark_datastructure_freelist_msvc_spinlock_element **fe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_MUTEX_GET_KEY_FROM_ELEMENT( freelist_element ) ( (freelist_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_MUTEX_SET_KEY_IN_ELEMENT( freelist_element, new_key ) ( (freelist_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_MUTEX_GET_VALUE_FROM_ELEMENT( freelist_element ) ( (freelist_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_MUTEX_SET_VALUE_IN_ELEMENT( freelist_element, new_value ) ( (freelist_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_MUTEX_GET_USER_STATE_FROM_STATE( freelist_state ) ( (freelist_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_freelist_pthread_mutex_state
+{
+ struct libbenchmark_datastructure_freelist_pthread_mutex_element
+ *top;
+
+ pal_lock_pthread_mutex_state
+ lock;
+
+ void
+ *user_state;
+};
+
+struct libbenchmark_datastructure_freelist_pthread_mutex_element
+{
+ struct libbenchmark_datastructure_freelist_pthread_mutex_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_freelist_pthread_mutex_init( struct libbenchmark_datastructure_freelist_pthread_mutex_state *fs, void *user_state );
+void libbenchmark_datastructure_freelist_pthread_mutex_cleanup( struct libbenchmark_datastructure_freelist_pthread_mutex_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_pthread_mutex_state *fs, struct libbenchmark_datastructure_freelist_pthread_mutex_element *fe, void *user_state) );
+
+void libbenchmark_datastructure_freelist_pthread_mutex_push( struct libbenchmark_datastructure_freelist_pthread_mutex_state *fs, struct libbenchmark_datastructure_freelist_pthread_mutex_element *fe );
+int libbenchmark_datastructure_freelist_pthread_mutex_pop( struct libbenchmark_datastructure_freelist_pthread_mutex_state *fs, struct libbenchmark_datastructure_freelist_pthread_mutex_element **fe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_RWLOCK_GET_KEY_FROM_ELEMENT( freelist_element ) ( (freelist_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_RWLOCK_SET_KEY_IN_ELEMENT( freelist_element, new_key ) ( (freelist_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_RWLOCK_GET_VALUE_FROM_ELEMENT( freelist_element ) ( (freelist_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_RWLOCK_SET_VALUE_IN_ELEMENT( freelist_element, new_value ) ( (freelist_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_RWLOCK_GET_USER_STATE_FROM_STATE( freelist_state ) ( (freelist_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_freelist_pthread_rwlock_state
+{
+ struct libbenchmark_datastructure_freelist_pthread_rwlock_element
+ *top;
+
+ pal_lock_pthread_rwlock_state
+ lock;
+
+ void
+ *user_state;
+};
+
+struct libbenchmark_datastructure_freelist_pthread_rwlock_element
+{
+ struct libbenchmark_datastructure_freelist_pthread_rwlock_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_freelist_pthread_rwlock_init( struct libbenchmark_datastructure_freelist_pthread_rwlock_state *fs, void *user_state );
+void libbenchmark_datastructure_freelist_pthread_rwlock_cleanup( struct libbenchmark_datastructure_freelist_pthread_rwlock_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_pthread_rwlock_state *fs, struct libbenchmark_datastructure_freelist_pthread_rwlock_element *fe, void *user_state) );
+
+void libbenchmark_datastructure_freelist_pthread_rwlock_push( struct libbenchmark_datastructure_freelist_pthread_rwlock_state *fs, struct libbenchmark_datastructure_freelist_pthread_rwlock_element *fe );
+int libbenchmark_datastructure_freelist_pthread_rwlock_pop( struct libbenchmark_datastructure_freelist_pthread_rwlock_state *fs, struct libbenchmark_datastructure_freelist_pthread_rwlock_element **fe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET_KEY_FROM_ELEMENT( freelist_element ) ( (freelist_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_KEY_IN_ELEMENT( freelist_element, new_key ) ( (freelist_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET_VALUE_FROM_ELEMENT( freelist_element ) ( (freelist_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_VALUE_IN_ELEMENT( freelist_element, new_value ) ( (freelist_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET_USER_STATE_FROM_STATE( freelist_state ) ( (freelist_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state
+{
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element
+ *top;
+
+ pal_lock_pthread_spinlock_process_private_state
+ lock;
+
+ void
+ *user_state;
+};
+
+struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element
+{
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_freelist_pthread_spinlock_process_private_init( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state *fs, void *user_state );
+void libbenchmark_datastructure_freelist_pthread_spinlock_process_private_cleanup( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state *fs, struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element *fe, void *user_state) );
+
+void libbenchmark_datastructure_freelist_pthread_spinlock_process_private_push( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state *fs, struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element *fe );
+int libbenchmark_datastructure_freelist_pthread_spinlock_process_private_pop( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state *fs, struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element **fe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_SPINLOCK_PROCESS_SHARED_GET_KEY_FROM_ELEMENT( freelist_element ) ( (freelist_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_KEY_IN_ELEMENT( freelist_element, new_key ) ( (freelist_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_SPINLOCK_PROCESS_SHARED_GET_VALUE_FROM_ELEMENT( freelist_element ) ( (freelist_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_VALUE_IN_ELEMENT( freelist_element, new_value ) ( (freelist_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_PTHREAD_SPINLOCK_PROCESS_SHARED_GET_USER_STATE_FROM_STATE( freelist_state ) ( (freelist_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state
+{
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element
+ *top;
+
+ pal_lock_pthread_spinlock_process_shared_state
+ lock;
+
+ void
+ *user_state;
+};
+
+struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element
+{
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_init( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state *fs, void *user_state );
+void libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_cleanup( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state *fs, struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element *fe, void *user_state) );
+
+void libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_push( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state *fs, struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element *fe );
+int libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_pop( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state *fs, struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element **fe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_WINDOWS_CRITICAL_SECTION_GET_KEY_FROM_ELEMENT( freelist_element ) ( (freelist_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_WINDOWS_CRITICAL_SECTION_SET_KEY_IN_ELEMENT( freelist_element, new_key ) ( (freelist_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_WINDOWS_CRITICAL_SECTION_GET_VALUE_FROM_ELEMENT( freelist_element ) ( (freelist_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_WINDOWS_CRITICAL_SECTION_SET_VALUE_IN_ELEMENT( freelist_element, new_value ) ( (freelist_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_WINDOWS_CRITICAL_SECTION_GET_USER_STATE_FROM_STATE( freelist_state ) ( (freelist_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_freelist_windows_critical_section_state
+{
+ struct libbenchmark_datastructure_freelist_windows_critical_section_element
+ *top;
+
+ pal_lock_windows_critical_section_state
+ lock;
+
+ void
+ *user_state;
+};
+
+struct libbenchmark_datastructure_freelist_windows_critical_section_element
+{
+ struct libbenchmark_datastructure_freelist_windows_critical_section_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_freelist_windows_critical_section_init( struct libbenchmark_datastructure_freelist_windows_critical_section_state *fs, void *user_state );
+void libbenchmark_datastructure_freelist_windows_critical_section_cleanup( struct libbenchmark_datastructure_freelist_windows_critical_section_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_windows_critical_section_state *fs, struct libbenchmark_datastructure_freelist_windows_critical_section_element *fe, void *user_state) );
+
+void libbenchmark_datastructure_freelist_windows_critical_section_push( struct libbenchmark_datastructure_freelist_windows_critical_section_state *fs, struct libbenchmark_datastructure_freelist_windows_critical_section_element *fe );
+int libbenchmark_datastructure_freelist_windows_critical_section_pop( struct libbenchmark_datastructure_freelist_windows_critical_section_state *fs, struct libbenchmark_datastructure_freelist_windows_critical_section_element **fe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_WINDOWS_MUTEX_GET_KEY_FROM_ELEMENT( freelist_element ) ( (freelist_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_WINDOWS_MUTEX_SET_KEY_IN_ELEMENT( freelist_element, new_key ) ( (freelist_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_WINDOWS_MUTEX_GET_VALUE_FROM_ELEMENT( freelist_element ) ( (freelist_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_WINDOWS_MUTEX_SET_VALUE_IN_ELEMENT( freelist_element, new_value ) ( (freelist_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_FREELIST_WINDOWS_MUTEX_GET_USER_STATE_FROM_STATE( freelist_state ) ( (freelist_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_freelist_windows_mutex_state
+{
+ struct libbenchmark_datastructure_freelist_windows_mutex_element
+ *top;
+
+ pal_lock_windows_mutex_state
+ lock;
+
+ void
+ *user_state;
+};
+
+struct libbenchmark_datastructure_freelist_windows_mutex_element
+{
+ struct libbenchmark_datastructure_freelist_windows_mutex_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_freelist_windows_mutex_init( struct libbenchmark_datastructure_freelist_windows_mutex_state *fs, void *user_state );
+void libbenchmark_datastructure_freelist_windows_mutex_cleanup( struct libbenchmark_datastructure_freelist_windows_mutex_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_windows_mutex_state *fs, struct libbenchmark_datastructure_freelist_windows_mutex_element *fe, void *user_state) );
+
+void libbenchmark_datastructure_freelist_windows_mutex_push( struct libbenchmark_datastructure_freelist_windows_mutex_state *fs, struct libbenchmark_datastructure_freelist_windows_mutex_element *fe );
+int libbenchmark_datastructure_freelist_windows_mutex_pop( struct libbenchmark_datastructure_freelist_windows_mutex_state *fs, struct libbenchmark_datastructure_freelist_windows_mutex_element **fe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_GCC_SPINLOCK_ATOMIC_GET_KEY_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_GCC_SPINLOCK_ATOMIC_SET_KEY_IN_ELEMENT( queue_umm_element, new_key ) ( (queue_umm_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_GCC_SPINLOCK_ATOMIC_GET_VALUE_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_GCC_SPINLOCK_ATOMIC_SET_VALUE_IN_ELEMENT( queue_umm_element, new_value ) ( (queue_umm_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_GCC_SPINLOCK_ATOMIC_GET_USER_STATE_FROM_STATE( queue_umm_state ) ( (queue_umm_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element
+{
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state
+{
+ /* TRD : the pointers are on separate cache lines so threads enqueuing do not
+ physically collide with thread dequeuing; this is done to be fair in
+ the benchmark to the lock-free code, which does the same
+
+ since we're not atomic, we don't need to be LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES
+ bytes apart (e.g. the ERG on ARM), only cache line length in bytes
+ */
+
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *enqueue_umm,
+ *dequeue_umm;
+
+ pal_lock_gcc_spinlock_atomic_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock_enqueue_umm,
+ lock_dequeue_umm;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_init( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element *qe, void *user_state );
+void libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_cleanup( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state *qs, void (*element_dequeue_umm_callback)(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element *qe, enum flag dummy_flag) );
+
+void libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_enqueue_umm( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element *qe );
+int libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_dequeue_umm( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element **qe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_GCC_SPINLOCK_SYNC_GET_KEY_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_GCC_SPINLOCK_SYNC_SET_KEY_IN_ELEMENT( queue_umm_element, new_key ) ( (queue_umm_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_GCC_SPINLOCK_SYNC_GET_VALUE_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_GCC_SPINLOCK_SYNC_SET_VALUE_IN_ELEMENT( queue_umm_element, new_value ) ( (queue_umm_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_GCC_SPINLOCK_SYNC_GET_USER_STATE_FROM_STATE( queue_umm_state ) ( (queue_umm_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element
+{
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state
+{
+ /* TRD : the pointers are on separate cache lines so threads enqueuing do not
+ physically collide with thread dequeuing; this is done to be fair in
+ the benchmark to the lock-free code, which does the same
+
+ since we're not sync, we don't need to be LFDS700_PAL_SYNC_ISOLATION_IN_BYTES
+ bytes apart (e.g. the ERG on ARM), only cache line length in bytes
+ */
+
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *enqueue_umm,
+ *dequeue_umm;
+
+ pal_lock_gcc_spinlock_sync_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock_enqueue_umm,
+ lock_dequeue_umm;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_init( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element *qe, void *user_state );
+void libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_cleanup( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state *qs, void (*element_dequeue_umm_callback)(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element *qe, enum flag dummy_flag) );
+
+void libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_enqueue_umm( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element *qe );
+int libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_dequeue_umm( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element **qe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_MSVC_SPINLOCK_GET_KEY_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_MSVC_SPINLOCK_SET_KEY_IN_ELEMENT( queue_umm_element, new_key ) ( (queue_umm_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_MSVC_SPINLOCK_GET_VALUE_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_MSVC_SPINLOCK_SET_VALUE_IN_ELEMENT( queue_umm_element, new_value ) ( (queue_umm_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_MSVC_SPINLOCK_GET_USER_STATE_FROM_STATE( queue_umm_state ) ( (queue_umm_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element
+{
+ struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state
+{
+ /* TRD : the pointers are on separate cache lines so threads enqueuing do not
+ physically collide with thread dequeuing; this is done to be fair in
+ the benchmark to the lock-free code, which does the same
+
+ since we're not atomic, we don't need to be LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES
+ bytes apart (e.g. the ERG on ARM), only cache line length in bytes
+ */
+
+ struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *enqueue_umm,
+ *dequeue_umm;
+
+ pal_lock_msvc_spinlock_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock_enqueue_umm,
+ lock_dequeue_umm;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_queue_umm_msvc_spinlock_init( struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state *qs, struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element *qe, void *user_state );
+void libbenchmark_datastructure_queue_umm_msvc_spinlock_cleanup( struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state *qs, void (*element_dequeue_umm_callback)(struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state *qs, struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element *qe, enum flag dummy_flag) );
+
+void libbenchmark_datastructure_queue_umm_msvc_spinlock_enqueue_umm( struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state *qs, struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element *qe );
+int libbenchmark_datastructure_queue_umm_msvc_spinlock_dequeue_umm( struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state *qs, struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element **qe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_MUTEX_GET_KEY_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_MUTEX_SET_KEY_IN_ELEMENT( queue_umm_element, new_key ) ( (queue_umm_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_MUTEX_GET_VALUE_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_MUTEX_SET_VALUE_IN_ELEMENT( queue_umm_element, new_value ) ( (queue_umm_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_MUTEX_GET_USER_STATE_FROM_STATE( queue_umm_state ) ( (queue_umm_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_queue_umm_pthread_mutex_element
+{
+ struct libbenchmark_datastructure_queue_umm_pthread_mutex_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_queue_umm_pthread_mutex_state
+{
+ /* TRD : the pointers are on separate cache lines so threads enqueuing do not
+ physically collide with thread dequeuing; this is done to be fair in
+ the benchmark to the lock-free code, which does the same
+
+ since we're not atomic, we don't need to be LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES
+ bytes apart (e.g. the ERG on ARM), only cache line length in bytes
+ */
+
+ struct libbenchmark_datastructure_queue_umm_pthread_mutex_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *enqueue_umm,
+ *dequeue_umm;
+
+ pal_lock_pthread_mutex_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock_enqueue_umm,
+ lock_dequeue_umm;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_queue_umm_pthread_mutex_init( struct libbenchmark_datastructure_queue_umm_pthread_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_mutex_element *qe, void *user_state );
+void libbenchmark_datastructure_queue_umm_pthread_mutex_cleanup( struct libbenchmark_datastructure_queue_umm_pthread_mutex_state *qs, void (*element_dequeue_umm_callback)(struct libbenchmark_datastructure_queue_umm_pthread_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_mutex_element *qe, enum flag dummy_flag) );
+
+void libbenchmark_datastructure_queue_umm_pthread_mutex_enqueue_umm( struct libbenchmark_datastructure_queue_umm_pthread_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_mutex_element *qe );
+int libbenchmark_datastructure_queue_umm_pthread_mutex_dequeue_umm( struct libbenchmark_datastructure_queue_umm_pthread_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_mutex_element **qe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_RWLOCK_GET_KEY_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_RWLOCK_SET_KEY_IN_ELEMENT( queue_umm_element, new_key ) ( (queue_umm_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_RWLOCK_GET_VALUE_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_RWLOCK_SET_VALUE_IN_ELEMENT( queue_umm_element, new_value ) ( (queue_umm_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_RWLOCK_GET_USER_STATE_FROM_STATE( queue_umm_state ) ( (queue_umm_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element
+{
+ struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state
+{
+ /* TRD : the pointers are on separate cache lines so threads enqueuing do not
+ physically collide with thread dequeuing; this is done to be fair in
+ the benchmark to the lock-free code, which does the same
+
+ since we're not atomic, we don't need to be LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES
+ bytes apart (e.g. the ERG on ARM), only cache line length in bytes
+ */
+
+ struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *enqueue_umm,
+ *dequeue_umm;
+
+ pal_lock_pthread_rwlock_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock_enqueue_umm,
+ lock_dequeue_umm;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_queue_umm_pthread_rwlock_init( struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element *qe, void *user_state );
+void libbenchmark_datastructure_queue_umm_pthread_rwlock_cleanup( struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state *qs, void (*element_dequeue_umm_callback)(struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element *qe, enum flag dummy_flag) );
+
+void libbenchmark_datastructure_queue_umm_pthread_rwlock_enqueue_umm( struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element *qe );
+int libbenchmark_datastructure_queue_umm_pthread_rwlock_dequeue_umm( struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element **qe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET_KEY_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_KEY_IN_ELEMENT( queue_umm_element, new_key ) ( (queue_umm_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET_VALUE_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_VALUE_IN_ELEMENT( queue_umm_element, new_value ) ( (queue_umm_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET_USER_STATE_FROM_STATE( queue_umm_state ) ( (queue_umm_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element
+{
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state
+{
+ /* TRD : the pointers are on separate cache lines so threads enqueuing do not
+ physically collide with thread dequeuing; this is done to be fair in
+ the benchmark to the lock-free code, which does the same
+
+ since we're not atomic, we don't need to be LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES
+ bytes apart (e.g. the ERG on ARM), only cache line length in bytes
+ */
+
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *enqueue_umm,
+ *dequeue_umm;
+
+ pal_lock_pthread_spinlock_process_private_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock_enqueue_umm,
+ lock_dequeue_umm;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_init( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element *qe, void *user_state );
+void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_cleanup( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, void (*element_dequeue_umm_callback)(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element *qe, enum flag dummy_flag) );
+
+void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_enqueue_umm( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element *qe );
+int libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_dequeue_umm( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element **qe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_SHARED_GET_KEY_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_KEY_IN_ELEMENT( queue_umm_element, new_key ) ( (queue_umm_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_SHARED_GET_VALUE_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_VALUE_IN_ELEMENT( queue_umm_element, new_value ) ( (queue_umm_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_PTHREAD_SPINLOCK_PROCESS_SHARED_GET_USER_STATE_FROM_STATE( queue_umm_state ) ( (queue_umm_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element
+{
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state
+{
+ /* TRD : the pointers are on separate cache lines so threads enqueuing do not
+ physically collide with thread dequeuing; this is done to be fair in
+ the benchmark to the lock-free code, which does the same
+
+ since we're not atomic, we don't need to be LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES
+ bytes apart (e.g. the ERG on ARM), only cache line length in bytes
+ */
+
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *enqueue_umm,
+ *dequeue_umm;
+
+ pal_lock_pthread_spinlock_process_shared_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock_enqueue_umm,
+ lock_dequeue_umm;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_init( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element *qe, void *user_state );
+void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_cleanup( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state *qs, void (*element_dequeue_umm_callback)(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element *qe, enum flag dummy_flag) );
+
+void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_enqueue_umm( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element *qe );
+int libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_dequeue_umm( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element **qe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_WINDOWS_CRITICAL_SECTION_GET_KEY_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_WINDOWS_CRITICAL_SECTION_SET_KEY_IN_ELEMENT( queue_umm_element, new_key ) ( (queue_umm_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_WINDOWS_CRITICAL_SECTION_GET_VALUE_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_WINDOWS_CRITICAL_SECTION_SET_VALUE_IN_ELEMENT( queue_umm_element, new_value ) ( (queue_umm_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_WINDOWS_CRITICAL_SECTION_GET_USER_STATE_FROM_STATE( queue_umm_state ) ( (queue_umm_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_queue_umm_windows_critical_section_element
+{
+ struct libbenchmark_datastructure_queue_umm_windows_critical_section_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_queue_umm_windows_critical_section_state
+{
+ /* TRD : the pointers are on separate cache lines so threads enqueuing do not
+ physically collide with thread dequeuing; this is done to be fair in
+ the benchmark to the lock-free code, which does the same
+
+ since we're not atomic, we don't need to be LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES
+ bytes apart (e.g. the ERG on ARM), only cache line length in bytes
+ */
+
+ struct libbenchmark_datastructure_queue_umm_windows_critical_section_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *enqueue_umm,
+ *dequeue_umm;
+
+ pal_lock_windows_critical_section_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock_enqueue_umm,
+ lock_dequeue_umm;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_queue_umm_windows_critical_section_init( struct libbenchmark_datastructure_queue_umm_windows_critical_section_state *qs, struct libbenchmark_datastructure_queue_umm_windows_critical_section_element *qe, void *user_state );
+void libbenchmark_datastructure_queue_umm_windows_critical_section_cleanup( struct libbenchmark_datastructure_queue_umm_windows_critical_section_state *qs, void (*element_dequeue_umm_callback)(struct libbenchmark_datastructure_queue_umm_windows_critical_section_state *qs, struct libbenchmark_datastructure_queue_umm_windows_critical_section_element *qe, enum flag dummy_flag) );
+
+void libbenchmark_datastructure_queue_umm_windows_critical_section_enqueue_umm( struct libbenchmark_datastructure_queue_umm_windows_critical_section_state *qs, struct libbenchmark_datastructure_queue_umm_windows_critical_section_element *qe );
+int libbenchmark_datastructure_queue_umm_windows_critical_section_dequeue_umm( struct libbenchmark_datastructure_queue_umm_windows_critical_section_state *qs, struct libbenchmark_datastructure_queue_umm_windows_critical_section_element **qe );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_WINDOWS_MUTEX_GET_KEY_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).key )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_WINDOWS_MUTEX_SET_KEY_IN_ELEMENT( queue_umm_element, new_key ) ( (queue_umm_element).key = (void *) (lfds710_pal_uint_t) (new_key) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_WINDOWS_MUTEX_GET_VALUE_FROM_ELEMENT( queue_umm_element ) ( (queue_umm_element).value )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_WINDOWS_MUTEX_SET_VALUE_IN_ELEMENT( queue_umm_element, new_value ) ( (queue_umm_element).value = (void *) (lfds710_pal_uint_t) (new_value) )
+#define LIBBENCHMARK_DATA_STRUCTURE_QUEUE_UMM_WINDOWS_MUTEX_GET_USER_STATE_FROM_STATE( queue_umm_state ) ( (queue_umm_state).user_state )
+
+/***** structures *****/
+struct libbenchmark_datastructure_queue_umm_windows_mutex_element
+{
+ struct libbenchmark_datastructure_queue_umm_windows_mutex_element
+ *next;
+
+ void
+ *key,
+ *value;
+};
+
+struct libbenchmark_datastructure_queue_umm_windows_mutex_state
+{
+ /* TRD : the pointers are on separate cache lines so threads enqueuing do not
+ physically collide with thread dequeuing; this is done to be fair in
+ the benchmark to the lock-free code, which does the same
+
+ since we're not atomic, we don't need to be LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES
+ bytes apart (e.g. the ERG on ARM), only cache line length in bytes
+ */
+
+ struct libbenchmark_datastructure_queue_umm_windows_mutex_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ *enqueue_umm,
+ *dequeue_umm;
+
+ pal_lock_windows_mutex_state LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ lock_enqueue_umm,
+ lock_dequeue_umm;
+
+ void
+ *user_state;
+};
+
+/***** public prototypes *****/
+void libbenchmark_datastructure_queue_umm_windows_mutex_init( struct libbenchmark_datastructure_queue_umm_windows_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_windows_mutex_element *qe, void *user_state );
+void libbenchmark_datastructure_queue_umm_windows_mutex_cleanup( struct libbenchmark_datastructure_queue_umm_windows_mutex_state *qs, void (*element_dequeue_umm_callback)(struct libbenchmark_datastructure_queue_umm_windows_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_windows_mutex_element *qe, enum flag dummy_flag) );
+
+void libbenchmark_datastructure_queue_umm_windows_mutex_enqueue_umm( struct libbenchmark_datastructure_queue_umm_windows_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_windows_mutex_element *qe );
+int libbenchmark_datastructure_queue_umm_windows_mutex_dequeue_umm( struct libbenchmark_datastructure_queue_umm_windows_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_windows_mutex_element **qe );
+
--- /dev/null
+/***** enums *****/
+enum libbenchmark_datastructure_id
+{
+ LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU,
+ LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST,
+ LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM,
+ LIBBENCHMARK_DATASTRUCTURE_COUNT
+};
+
+enum libbenchmark_benchmark_id
+{
+ LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN,
+ LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1,
+ LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1
+};
+
+enum libbenchmark_lock_id
+{
+ LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_ATOMIC,
+ LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_SYNC,
+ LIBBENCHMARK_LOCK_ID_LIBLFDS700_LOCKFREE,
+ LIBBENCHMARK_LOCK_ID_LIBLFDS710_LOCKFREE,
+ LIBBENCHMARK_LOCK_ID_MSVC_SPINLOCK,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_MUTEX,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_RWLOCK,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_PRIVATE,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_SHARED,
+ LIBBENCHMARK_LOCK_ID_WINDOWS_CRITICAL_SECTION,
+ LIBBENCHMARK_LOCK_ID_WINDOWS_MUTEX
+};
+
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_GNUPLOT_OPTIONS_INIT( gpo ) (gpo).y_axis_scale_type = LIBBENCHMARK_GNUPLOT_Y_AXIS_SCALE_TYPE_LINEAR, (gpo).width_in_pixels_set_flag = LOWERED, (gpo).width_in_pixels_set_flag = LOWERED
+#define LIBBENCHMARK_GNUPLOT_OPTIONS_SET_Y_AXIS_SCALE_TYPE_LOGARITHMIC( gpo ) (gpo).y_axis_scale_type = LIBBENCHMARK_GNUPLOT_Y_AXIS_SCALE_TYPE_LOGARITHMIC;
+#define LIBBENCHMARK_GNUPLOT_OPTIONS_SET_WIDTH_IN_PIXELS( gpo, wip ) (gpo).width_in_pixels = wip, (gpo).width_in_pixels_set_flag = RAISED
+#define LIBBENCHMARK_GNUPLOT_OPTIONS_SET_HEIGHT_IN_PIXELS( gpo, wip ) (gpo).height_in_pixels = wip, (gpo).height_in_pixels_set_flag = RAISED
+
+/***** enums *****/
+enum libbenchmark_gnuplot_y_axis_scale_type
+{
+ LIBBENCHMARK_GNUPLOT_Y_AXIS_SCALE_TYPE_LINEAR,
+ LIBBENCHMARK_GNUPLOT_Y_AXIS_SCALE_TYPE_LOGARITHMIC
+};
+
+/***** structs *****/
+struct libbenchmark_gnuplot_options
+{
+ enum flag
+ width_in_pixels_set_flag,
+ height_in_pixels_set_flag;
+
+ enum libbenchmark_gnuplot_y_axis_scale_type
+ y_axis_scale_type;
+
+ lfds710_pal_uint_t
+ width_in_pixels,
+ height_in_pixels;
+};
+
+/***** public prototypes *****/
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_MISC_VERSION_STRING "7.1.0"
+#define LIBBENCHMARK_MISC_VERSION_INTEGER 710
+
+/***** enums *****/
+enum libbenchmark_misc_query
+{
+ LIBBENCHMARK_MISC_QUERY_GET_BUILD_AND_VERSION_STRING
+};
+
+/***** externs *****/
+extern char const
+ * const libbenchmark_globals_datastructure_names[],
+ * const libbenchmark_globals_benchmark_names[],
+ * const libbenchmark_globals_lock_names[],
+ * const libbenchmark_globals_numa_mode_names[];
+
+extern lfds710_pal_uint_t
+ libbenchmark_globals_benchmark_duration_in_seconds;
+
+/***** public prototypes *****/
+void libbenchmark_misc_pal_helper_new_topology_node( struct libbenchmark_topology_node_state **tns,
+ struct libshared_memory_state *ms );
+
+void libbenchmark_misc_pal_helper_add_logical_processor_to_topology_node( struct libbenchmark_topology_node_state *tns,
+ struct libshared_memory_state *ms,
+ lfds710_pal_uint_t logical_processor_number,
+ enum flag windows_processor_group_inuse_flag,
+ lfds710_pal_uint_t windows_processor_group_number );
+
+void libbenchmark_misc_pal_helper_add_system_node_to_topology_tree( struct libbenchmark_topology_state *ts,
+ struct libbenchmark_topology_node_state *tns );
+
+void libbenchmark_misc_pal_helper_add_numa_node_to_topology_tree( struct libbenchmark_topology_state *ts,
+ struct libbenchmark_topology_node_state *tns,
+ lfds710_pal_uint_t numa_node_id );
+
+void libbenchmark_misc_pal_helper_add_socket_node_to_topology_tree( struct libbenchmark_topology_state *ts,
+ struct libbenchmark_topology_node_state *tns );
+
+void libbenchmark_misc_pal_helper_add_physical_processor_node_to_topology_tree( struct libbenchmark_topology_state *ts,
+ struct libbenchmark_topology_node_state *tns );
+
+void libbenchmark_misc_pal_helper_add_cache_node_to_topology_tree( struct libbenchmark_topology_state *ts,
+ struct libbenchmark_topology_node_state *tns,
+ lfds710_pal_uint_t level,
+ enum libbenchmark_topology_node_cache_type type );
+
+void libbenchmark_misc_pal_helper_add_logical_processor_node_to_topology_tree( struct libbenchmark_topology_state *ts,
+ struct libshared_memory_state *ms,
+ lfds710_pal_uint_t logical_processor_number,
+ enum flag windows_processor_group_inuse_flag,
+ lfds710_pal_uint_t windows_processor_group_number );
+
+void libbenchmark_misc_query( enum libbenchmark_misc_query query_type, void *query_input, void *query_output );
+
--- /dev/null
+/***** defines *****/
+#if( !defined NULL )
+ #define NULL (void *) 0
+#endif
+
+#if( defined __GNUC__ )
+ // TRD : makes checking GCC versions much tidier
+ #define LIBLFDS_GCC_VERSION ( __GNUC__ * 100 + __GNUC_MINOR__ * 10 + __GNUC_PATCHLEVEL__ )
+#endif
+
+/***** structs *****/
+
+/***** public prototypes *****/
+int libbenchmark_porting_abstraction_layer_populate_topology( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms );
+void libbenchmark_porting_abstraction_layer_topology_node_cleanup( struct libbenchmark_topology_node_state *tns );
+
+void libbenchmark_pal_print_string( char const * const string );
+
--- /dev/null
+/****************************************************************************/
+#if( defined __GNUC__ && LIBLFDS_GCC_VERSION >= 473 )
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC 1
+
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_UNINITIALIZED 0
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_AVAILABLE 1
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_UNAVAILABLE 2
+
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_CREATE( pal_lock_gcc_spinlock_atomic_state ) \
+ { \
+ (pal_lock_gcc_spinlock_atomic_state) = LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_AVAILABLE; \
+ LFDS710_MISC_BARRIER_STORE; \
+ lfds710_misc_force_store(); \
+ }
+
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_DESTROY( pal_lock_gcc_spinlock_atomic_state ) \
+ { \
+ (pal_lock_gcc_spinlock_atomic_state) = LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_UNAVAILABLE; \
+ LFDS710_MISC_BARRIER_STORE; \
+ lfds710_misc_force_store(); \
+ }
+
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_GET( pal_lock_gcc_spinlock_atomic_state ) \
+ { \
+ lfds710_pal_uint_t expected = (lfds710_pal_uint_t) LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_AVAILABLE; \
+ while( 0 == __atomic_compare_exchange_n(&(pal_lock_gcc_spinlock_atomic_state), &expected, (lfds710_pal_uint_t) LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_UNAVAILABLE, 1, __ATOMIC_RELAXED, __ATOMIC_RELAXED) ) expected = (lfds710_pal_uint_t) LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_AVAILABLE; \
+ }
+
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_RELEASE( pal_lock_gcc_spinlock_atomic_state ) __atomic_exchange_n( &(pal_lock_gcc_spinlock_atomic_state), (void *) LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_AVAILABLE, __ATOMIC_RELAXED )
+
+ /***** typedefs *****/
+ typedef lfds710_pal_uint_t pal_lock_gcc_spinlock_atomic_state;
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC )
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC 0
+
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_CREATE( pal_lock_gcc_spinlock_atomic_state )
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_DESTROY( pal_lock_gcc_spinlock_atomic_state )
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_GET( pal_lock_gcc_spinlock_atomic_state )
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_RELEASE( pal_lock_gcc_spinlock_atomic_state )
+
+ /***** typedefs *****/
+ typedef void * pal_lock_gcc_spinlock_atomic_state;
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined __GNUC__ && LIBLFDS_GCC_VERSION >= 412 )
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC 1
+
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_UNINITIALIZED 0
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_AVAILABLE 1
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_UNAVAILABLE 2
+
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_CREATE( pal_lock_gcc_spinlock_sync_state ) \
+ { \
+ (pal_lock_gcc_spinlock_sync_state) = LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_AVAILABLE; \
+ LFDS710_MISC_BARRIER_STORE; \
+ lfds710_misc_force_store(); \
+ }
+
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_DESTROY( pal_lock_gcc_spinlock_sync_state ) \
+ { \
+ (pal_lock_gcc_spinlock_sync_state) = LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_UNAVAILABLE; \
+ LFDS710_MISC_BARRIER_STORE; \
+ lfds710_misc_force_store(); \
+ }
+
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_GET( pal_lock_gcc_spinlock_sync_state ) while( LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_AVAILABLE != __sync_val_compare_and_swap(&(pal_lock_gcc_spinlock_sync_state), (lfds710_pal_uint_t) LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_AVAILABLE, (lfds710_pal_uint_t) LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_UNAVAILABLE) )
+
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_RELEASE( pal_lock_gcc_spinlock_sync_state ) __sync_lock_test_and_set( &(pal_lock_gcc_spinlock_sync_state), LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_AVAILABLE )
+
+ /***** typedefs *****/
+ typedef lfds710_pal_uint_t pal_lock_gcc_spinlock_sync_state;
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC )
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC 0
+
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_CREATE( pal_lock_gcc_spinlock_atomic_sync )
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_DESTROY( pal_lock_gcc_spinlock_atomic_sync )
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_GET( pal_lock_gcc_spinlock_atomic_sync )
+ #define LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_RELEASE( pal_lock_gcc_spinlock_atomic_sync )
+
+ /***** typedefs *****/
+ typedef void * pal_lock_gcc_spinlock_sync_state;
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _MSC_VER && _MSC_VER >= 1400 )
+
+ /* TRD : MSVC
+
+ _MSC_VER indicates Microsoft C compiler
+ - _InterlockedCompareExchangePointer requires 8.0 (1400)
+ */
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK 1
+
+ #define LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_UNINITIALIZED 0
+ #define LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_AVAILABLE 1
+ #define LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_UNAVAILABLE 2
+
+ #define LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_CREATE( pal_lock_msvc_spinlock_state ) \
+ { \
+ pal_lock_msvc_spinlock_state = LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_AVAILABLE; \
+ LFDS710_MISC_BARRIER_STORE; \
+ lfds710_misc_force_store(); \
+ }
+
+ #define LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_DESTROY( pal_lock_msvc_spinlock_state ) \
+ { \
+ pal_lock_msvc_spinlock_state = LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_UNAVAILABLE; \
+ LFDS710_MISC_BARRIER_STORE; \
+ lfds710_misc_force_store(); \
+ }
+
+ /* TRD : bloody MS - they have multiple functions for the same thing
+ I have to use my own abstraction layer *just to get my code working on MS platforms!*
+ */
+
+ #define LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_GET( pal_lock_msvc_spinlock_state ) \
+ { \
+ char unsigned \
+ result; \
+ \
+ lfds710_pal_uint_t \
+ compare; \
+ \
+ do \
+ { \
+ compare = LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_AVAILABLE; \
+ LFDS710_PAL_ATOMIC_CAS( &(pal_lock_msvc_spinlock_state), &compare, LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_UNAVAILABLE, LFDS710_MISC_CAS_STRENGTH_STRONG, result ); \
+ } \
+ while( result == 0 ); \
+ }
+
+ #define LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_RELEASE( pal_lock_msvc_spinlock_state ) \
+ { \
+ LFDS710_PAL_ATOMIC_SET( &(pal_lock_msvc_spinlock_state), LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_AVAILABLE ); \
+ }
+
+ /***** typedefs *****/
+ typedef lfds710_pal_uint_t pal_lock_msvc_spinlock_state;
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK )
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK 0
+
+ #define LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_CREATE( pal_lock_msvc_spinlock_state )
+ #define LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_DESTROY( pal_lock_msvc_spinlock_state )
+ #define LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_GET( pal_lock_msvc_spinlock_state )
+ #define LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_RELEASE( pal_lock_msvc_spinlock_state )
+
+ /***** typedefs *****/
+ typedef void * pal_lock_msvc_spinlock_state;
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _POSIX_THREADS && _POSIX_THREADS >= 0 && !defined KERNEL_MODE )
+
+ /* TRD : POSIX threads
+
+ _POSIX_THREADS indicates POSIX threads
+ - pthreads_mutex_init requires POSIX
+ - pthreads_mutex_destroy requires POSIX
+ - pthreads_mutex_lock requires POSIX
+ - pthreads_mutex_unlock requires POSIX
+ */
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX 1
+
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_CREATE( pal_lock_pthread_mutex_state ) pthread_mutex_init( &pal_lock_pthread_mutex_state, NULL )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_DESTROY( pal_lock_pthread_mutex_state ) pthread_mutex_destroy( &pal_lock_pthread_mutex_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_GET( pal_lock_pthread_mutex_state ) pthread_mutex_lock( &pal_lock_pthread_mutex_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_RELEASE( pal_lock_pthread_mutex_state ) pthread_mutex_unlock( &pal_lock_pthread_mutex_state )
+
+ /***** typedefs *****/
+ typedef pthread_mutex_t pal_lock_pthread_mutex_state;
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX )
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX 0
+
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_CREATE( pal_lock_pthread_mutex_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_DESTROY( pal_lock_pthread_mutex_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_GET( pal_lock_pthread_mutex_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_RELEASE( pal_lock_pthread_mutex_state )
+
+ /***** typedefs *****/
+ typedef void * pal_lock_pthread_mutex_state;
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _POSIX_THREADS && _POSIX_THREADS >= 0 && !defined KERNEL_MODE )
+
+ /* TRD : POSIX threads
+
+ _POSIX_THREADS indicates POSIX threads
+ - pthreads_rwlock_init requires POSIX
+ - pthreads_rwlock_destroy requires POSIX
+ - pthreads_rwlock_lock requires POSIX
+ - pthreads_rwlock_unlock requires POSIX
+ */
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK 1
+
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_CREATE( pal_lock_pthread_rwlock_state ) pthread_rwlock_init( &pal_lock_pthread_rwlock_state, NULL )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_DESTROY( pal_lock_pthread_rwlock_state ) pthread_rwlock_destroy( &pal_lock_pthread_rwlock_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_READ( pal_lock_pthread_rwlock_state ) pthread_rwlock_rdlock( &pal_lock_pthread_rwlock_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_WRITE( pal_lock_pthread_rwlock_state ) pthread_rwlock_wrlock( &pal_lock_pthread_rwlock_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_RELEASE( pal_lock_pthread_rwlock_state ) pthread_rwlock_unlock( &pal_lock_pthread_rwlock_state )
+
+ /***** typedefs *****/
+ typedef pthread_rwlock_t pal_lock_pthread_rwlock_state;
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK )
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK 0
+
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_CREATE( pal_lock_pthread_rwlock_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_DESTROY( pal_lock_pthread_rwlock_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_READ( pal_lock_pthread_rwlock_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_WRITE( pal_lock_pthread_rwlock_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_RELEASE( pal_lock_pthread_rwlock_state )
+
+ /***** typedefs *****/
+ typedef void * pal_lock_pthread_rwlock_state;
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _POSIX_SPIN_LOCKS && _POSIX_SPIN_LOCKS >= 0 && !defined KERNEL_MODE )
+
+ /* TRD : POSIX spin lockss
+
+ _POSIX_SPIN_LOCKS indicates POSIX spin locks
+ - pthreads_spin_init requires POSIX
+ - pthreads_spin_destroy requires POSIX
+ - pthreads_spin_lock requires POSIX
+ - pthreads_spin_unlock requires POSIX
+ */
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE 1
+
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_CREATE( pal_lock_pthread_spinlock_process_private_state ) pthread_spin_init( &pal_lock_pthread_spinlock_process_private_state, PTHREAD_PROCESS_PRIVATE )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_DESTROY( pal_lock_pthread_spinlock_process_private_state ) pthread_spin_destroy( &pal_lock_pthread_spinlock_process_private_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET( pal_lock_pthread_spinlock_process_private_state ) pthread_spin_lock( &pal_lock_pthread_spinlock_process_private_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE( pal_lock_pthread_spinlock_process_private_state ) pthread_spin_unlock( &pal_lock_pthread_spinlock_process_private_state )
+
+ /***** typedefs *****/
+ typedef pthread_spinlock_t pal_lock_pthread_spinlock_process_private_state;
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE )
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE 0
+
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_CREATE( pal_lock_pthread_spinlock_process_private_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_DESTROY( pal_lock_pthread_spinlock_process_private_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET( pal_lock_pthread_spinlock_process_private_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE( pal_lock_pthread_spinlock_process_private_state )
+
+ /***** typedefs *****/
+ typedef void * pal_lock_pthread_spinlock_process_private_state;
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _POSIX_SPIN_LOCKS && _POSIX_SPIN_LOCKS >= 0 && !defined KERNEL_MODE )
+
+ /* TRD : POSIX spin locks
+
+ _POSIX_SPIN_LOCKS indicates POSIX spin locks
+ - pthreads_spin_init requires POSIX
+ - pthreads_spin_destroy requires POSIX
+ - pthreads_spin_lock requires POSIX
+ - pthreads_spin_unlock requires POSIX
+ */
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED 1
+
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_CREATE( pal_lock_pthread_spinlock_process_shared_state ) pthread_spin_init( &pal_lock_pthread_spinlock_process_shared_state, PTHREAD_PROCESS_SHARED );
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_DESTROY( pal_lock_pthread_spinlock_process_shared_state ) pthread_spin_destroy( &pal_lock_pthread_spinlock_process_shared_state );
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET( pal_lock_pthread_spinlock_process_shared_state ) pthread_spin_lock( &pal_lock_pthread_spinlock_process_shared_state );
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE( pal_lock_pthread_spinlock_process_shared_state ) pthread_spin_unlock( &pal_lock_pthread_spinlock_process_shared_state );
+
+ /***** typedefs *****/
+ typedef pthread_spinlock_t pal_lock_pthread_spinlock_process_shared_state;
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED )
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED 0
+
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_CREATE( pal_lock_pthread_spinlock_process_shared_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_DESTROY( pal_lock_pthread_spinlock_process_shared_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET( pal_lock_pthread_spinlock_process_shared_state )
+ #define LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE( pal_lock_pthread_spinlock_process_shared_state )
+
+ /***** typedefs *****/
+ typedef void * pal_lock_pthread_spinlock_process_shared_state;
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _WIN32 && NTDDI_VERSION >= NTDDI_WINXP && !defined KERNEL_MODE )
+
+ /* TRD : Windows XP or better
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ NTDDI_VERSION indicates Windows version
+ - InitializeCriticalSection requires XP
+ - DeleteCriticalSection requires XP
+ - EnterCriticalSection requires XP
+ - LeaveCriticalSection requires XP
+ */
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION 1
+
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_CREATE( pal_lock_windows_critical_section_state ) InitializeCriticalSection( &pal_lock_windows_critical_section_state )
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_DESTROY( pal_lock_windows_critical_section_state ) DeleteCriticalSection( &pal_lock_windows_critical_section_state )
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_GET( pal_lock_windows_critical_section_state ) EnterCriticalSection( &pal_lock_windows_critical_section_state )
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_RELEASE( pal_lock_windows_critical_section_state ) LeaveCriticalSection( &pal_lock_windows_critical_section_state )
+
+ /***** typedefs *****/
+ typedef CRITICAL_SECTION pal_lock_windows_critical_section_state;
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION )
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION 0
+
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_CREATE( pal_lock_windows_critical_section_state )
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_DESTROY( pal_lock_windows_critical_section_state )
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_GET( pal_lock_windows_critical_section_state )
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_RELEASE( pal_lock_windows_critical_section_state )
+
+ /***** typedefs *****/
+ typedef void * pal_lock_windows_critical_section_state;
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _WIN32 && NTDDI_VERSION >= NTDDI_WINXP && !defined KERNEL_MODE )
+
+ /* TRD : Windows XP or better
+
+ _WIN32 indicates 64-bit or 32-bit Windows
+ NTDDI_VERSION indicates Windows version
+ - CreateMutex requires XP
+ - CloseHandle requires XP
+ - WaitForSingleObject requires XP
+ - ReleaseMutex requires XP
+ */
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX 1
+
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_CREATE( pal_lock_windows_mutex_state ) pal_lock_windows_mutex_state = CreateMutex( NULL, FALSE, NULL )
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_DESTROY( pal_lock_windows_mutex_state ) CloseHandle( pal_lock_windows_mutex_state )
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_GET( pal_lock_windows_mutex_state ) WaitForSingleObject( pal_lock_windows_mutex_state, INFINITE )
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_RELEASE( pal_lock_windows_mutex_state ) ReleaseMutex( pal_lock_windows_mutex_state )
+
+ /***** typedefs *****/
+ typedef HANDLE pal_lock_windows_mutex_state;
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX )
+
+ /***** defines *****/
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX 0
+
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_CREATE( pal_lock_windows_mutex_state )
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_DESTROY( pal_lock_windows_mutex_state )
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_GET( pal_lock_windows_mutex_state )
+ #define LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_RELEASE( pal_lock_windows_mutex_state )
+
+ /***** typedefs *****/
+ typedef void * pal_lock_windows_mutex_state;
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _WIN32 && !defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_WINXP )
+
+ #ifdef LIBBENCHMARK_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform. in "libbenchmark_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LIBBENCHMARK_PAL_OPERATING_SYSTEM
+
+ #define LIBBENCHMARK_PAL_OS_STRING "Windows"
+
+ #include <windows.h>
+
+ #define LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( pointer_to_time_units_per_second ) QueryPerformanceFrequency( (LARGE_INTEGER *)(pointer_to_time_units_per_second) )
+
+ #define LIBBENCHMARK_PAL_GET_HIGHRES_TIME( pointer_to_time ) QueryPerformanceCounter( (LARGE_INTEGER *)(pointer_to_time) );
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_WINXP )
+
+ #ifdef LIBBENCHMARK_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform. in "libbenchmark_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LIBBENCHMARK_PAL_OPERATING_SYSTEM
+
+ #define LIBBENCHMARK_PAL_OS_STRING "Windows"
+
+ #include <wdm.h>
+
+ #define LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( pointer_to_time_units_per_second ) KeQueryPerformanceCounter( (LARGE_INTEGER *)(pointer_to_time_units_per_second) )
+
+ #define LIBBENCHMARK_PAL_GET_HIGHRES_TIME( pointer_to_time ) *( (LARGE_INTEGER *) pointer_to_time ) = KeQueryPerformanceCounter( NULL );
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && !defined KERNEL_MODE )
+
+ #ifdef LIBBENCHMARK_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform. in "libbenchmark_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LIBBENCHMARK_PAL_OPERATING_SYSTEM
+
+ #define LIBBENCHMARK_PAL_OS_STRING "Linux"
+
+ #define _GNU_SOURCE
+
+ #include <unistd.h>
+ #include <stdarg.h>
+ #include <stdio.h>
+
+ #if( defined _POSIX_THREADS && _POSIX_TIMERS >= 0 && _POSIX_MONOTONIC_CLOCK >= 0 )
+ #define LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( pointer_to_time_units_per_second ) *(pointer_to_time_units_per_second) = NUMBER_OF_NANOSECONDS_IN_ONE_SECOND
+
+ #define LIBBENCHMARK_PAL_GET_HIGHRES_TIME( pointer_to_time ) \
+ { \
+ struct timespec tp; \
+ clock_gettime( CLOCK_MONOTONIC_RAW, &tp ); \
+ *(pointer_to_time) = tp.tv_sec * NUMBER_OF_NANOSECONDS_IN_ONE_SECOND + tp.tv_nsec; \
+ }
+ #else
+ #error Linux without high resolution timers.
+ #endif
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && defined KERNEL_MODE )
+
+ #ifdef LIBBENCHMARK_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform. in "libbenchmark_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LIBBENCHMARK_PAL_OPERATING_SYSTEM
+
+ #define LIBBENCHMARK_PAL_OS_STRING "Linux"
+
+ #define _GNU_SOURCE
+
+ #include <linux/module.h>
+
+ /* TRD : not clear to me how to obtain high res freq and current count, in Linux kernel
+ it doesn't matter right now because it becamse clear earlier there's no wait-for-thread-to-complete
+ function either, which breaks the lfds thread abstraction, and until I sort that out, benchmark
+ can't run anyway
+ */
+
+ #error No high resolution time abstraction for the Linux kernel.
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBBENCHMARK_PAL_OPERATING_SYSTEM )
+
+ #error No matching porting abstraction layer in "libbenchmark_porting_abstraction_layer_operating_system.h".
+
+#endif
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_PRNG_MAX ( (lfds710_pal_uint_t) -1 )
+
+// TRD : 32-bit SplitMix, derived from Sebastiano vigna's site, CC0 license, http://xorshift.di.unimi.it/splitmix64.c, and email with Dr. Vigna
+#if( LFDS710_PAL_ALIGN_SINGLE_POINTER == 4 ) // TRD : any 32-bit platform
+ // TRD : struct LIBBENCHMARK_prng_state prng_state, lfds710_pal_uint_t seed
+ #define LIBBENCHMARK_PRNG_INIT( prng_state, seed ) (prng_state).entropy = (seed), (prng_state).entropy = ((prng_state).entropy ^ ((prng_state).entropy >> 16)) * 0x85ebca6bUL, (prng_state).entropy = ((prng_state).entropy ^ ((prng_state).entropy >> 13)) * 0xc2b2ae35UL, (prng_state).entropy = ((prng_state).entropy ^ ((prng_state).entropy >> 16))
+
+ // TRD : struct libbenchmark_prng_state prng_state, LIBBENCHMARK_pal_atom_t random_value
+ #define LIBBENCHMARK_PRNG_GENERATE( prng_state, random_value ) \
+ { \
+ (random_value) = ( (prng_state).entropy += 0x9E3779B9UL ); \
+ (random_value) = ((random_value) ^ ((random_value) >> 16)) * 0x85ebca6bUL; \
+ (random_value) = ((random_value) ^ ((random_value) >> 13)) * 0xc2b2ae35UL; \
+ (random_value) = (random_value ^ (random_value >> 16)); \
+ }
+
+ #define LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( random_value ) \
+ { \
+ (random_value) ^= ((random_value) >> 16); \
+ (random_value) *= 0x85ebca6b; \
+ (random_value) ^= ((random_value) >> 13); \
+ (random_value) *= 0xc2b2ae35; \
+ (random_value) ^= ((random_value) >> 16); \
+ }
+#endif
+
+// TRD : 64-bit SplitMix, from Sebastiano vigna's site, CC0 license, http://xorshift.di.unimi.it/splitmix64.c
+#if( LFDS710_PAL_ALIGN_SINGLE_POINTER == 8 ) // TRD : any 64-bit platform
+ // TRD : struct LIBBENCHMARK_prng_state prng_state, LIBBENCHMARK_atom_uint_t seed
+ #define LIBBENCHMARK_PRNG_INIT( prng_state, seed ) (prng_state).entropy = (seed), (prng_state).entropy = ((prng_state).entropy ^ ((prng_state).entropy >> 33)) * 0xff51afd7ed558ccdULL, (prng_state).entropy = ((prng_state).entropy ^ ((prng_state).entropy >> 33)) * 0xc4ceb9fe1a85ec53ULL, (prng_state).entropy = ((prng_state).entropy ^ ((prng_state).entropy >> 33))
+
+ // TRD : struct libbenchmark_prng_state prng_state, lfds710_pal_uint_t random_value
+ #define LIBBENCHMARK_PRNG_GENERATE( prng_state, random_value ) \
+ { \
+ (random_value) = ( (prng_state).entropy += 0x9E3779B97F4A7C15ULL ); \
+ (random_value) = ((random_value) ^ ((random_value) >> 30)) * 0xBF58476D1CE4E5B9ULL; \
+ (random_value) = ((random_value) ^ ((random_value) >> 27)) * 0x94D049BB133111EBULL; \
+ (random_value) = (random_value ^ (random_value >> 31)); \
+ }
+
+ #define LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( random_value ) \
+ { \
+ (random_value) ^= (random_value) >> 33; \
+ (random_value) *= 0xff51afd7ed558ccdULL; \
+ (random_value) ^= (random_value) >> 33; \
+ (random_value) *= 0xc4ceb9fe1a85ec53ULL; \
+ (random_value) ^= (random_value) >> 33; \
+ }
+#endif
+
+/***** enums *****/
+
+/***** structs *****/
+struct libbenchmark_prng_state
+{
+ lfds710_pal_uint_t
+ entropy;
+};
+
+/***** externs *****/
+
+/***** public prototypes *****/
+
--- /dev/null
+/***** defines *****/
+
+/***** enums *****/
+
+/***** structs *****/
+struct libbenchmark_result
+{
+ enum libbenchmark_benchmark_id
+ benchmark_id;
+
+ enum libbenchmark_datastructure_id
+ datastructure_id;
+
+ enum libbenchmark_lock_id
+ lock_id;
+
+ enum libbenchmark_topology_numa_mode
+ numa_mode;
+
+ lfds710_pal_uint_t
+ result;
+
+ struct lfds710_btree_au_element
+ baue;
+
+ struct lfds710_list_aso_state
+ *lpset;
+
+ struct libbenchmark_topology_node_state
+ tns;
+};
+
+struct libbenchmark_results_state
+{
+ struct libshared_memory_state
+ *ms;
+
+ struct lfds710_btree_au_state
+ results_tree;
+};
+
+/***** public prototypes *****/
+void libbenchmark_results_init( struct libbenchmark_results_state *rs,
+ struct libshared_memory_state *ms );
+void libbenchmark_results_cleanup( struct libbenchmark_results_state *rs );
+
+void libbenchmark_results_put_result( struct libbenchmark_results_state *rs,
+ enum libbenchmark_datastructure_id datastructure_id,
+ enum libbenchmark_benchmark_id benchmark_id,
+ enum libbenchmark_lock_id lock_id,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct lfds710_list_aso_state *lpset,
+ lfds710_pal_uint_t logical_processor_number,
+ lfds710_pal_uint_t windows_logical_processor_group_number,
+ lfds710_pal_uint_t result );
+
+int libbenchmark_results_get_result( struct libbenchmark_results_state *rs,
+ enum libbenchmark_datastructure_id datastructure_id,
+ enum libbenchmark_benchmark_id benchmark_id,
+ enum libbenchmark_lock_id lock_id,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct lfds710_list_aso_state *lpset,
+ struct libbenchmark_topology_node_state *tns,
+ lfds710_pal_uint_t *result );
+
+int libbenchmark_result_compare_function( void const *new_key, void const *existing_key );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( libbenchmark_threadset_per_thread_state ) (libbenchmark_threadset_per_thread_state).users_per_thread_state
+#define LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_NUMA_STATE( libbenchmark_threadset_per_thread_state ) (libbenchmark_threadset_per_thread_state).numa_node_state->users_per_numa_state
+#define LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( libbenchmark_threadset_per_thread_state ) (libbenchmark_threadset_per_thread_state).threadset_state->users_threadset_state
+
+/***** structs *****/
+struct libbenchmark_threadset_per_thread_state
+{
+ enum flag volatile
+ thread_ready_flag,
+ *threadset_start_flag;
+
+ libshared_pal_thread_handle_t
+ thread_handle;
+
+ struct lfds710_list_asu_element
+ lasue;
+
+ struct libbenchmark_topology_node_state
+ *tns_lp;
+
+ struct libbenchmark_threadset_per_numa_state
+ *numa_node_state;
+
+ struct libbenchmark_threadset_state
+ *threadset_state;
+
+ struct libshared_pal_thread_info
+ pti;
+
+ void
+ *users_per_thread_state;
+};
+
+struct libbenchmark_threadset_per_numa_state
+{
+ lfds710_pal_uint_t
+ numa_node_id;
+
+ struct lfds710_list_asu_element
+ lasue;
+
+ void
+ *users_per_numa_state;
+};
+
+struct libbenchmark_threadset_state
+{
+ enum flag volatile
+ threadset_start_flag;
+
+ libshared_pal_thread_return_t
+ (LIBSHARED_PAL_THREAD_CALLING_CONVENTION *thread_function)( void *thread_user_state );
+
+ struct lfds710_list_asu_state
+ list_of_per_numa_states,
+ list_of_per_thread_states;
+
+ void
+ *users_threadset_state;
+};
+
+/***** prototypes *****/
+void libbenchmark_threadset_init( struct libbenchmark_threadset_state *tsets,
+ struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ libshared_pal_thread_return_t (LIBSHARED_PAL_THREAD_CALLING_CONVENTION *thread_function)( void *thread_user_state ),
+ void *users_threadset_state );
+void libbenchmark_threadset_cleanup( struct libbenchmark_threadset_state *ts );
+
+void libbenchmark_threadset_run( struct libbenchmark_threadset_state *tsets );
+
+void libbenchmark_threadset_thread_ready_and_wait( struct libbenchmark_threadset_per_thread_state *ts );
+
--- /dev/null
+/***** enums *****/
+enum libbenchmark_topology_string_format
+{
+ LIBBENCHMARK_TOPOLOGY_STRING_FORMAT_STDOUT,
+ LIBBENCHMARK_TOPOLOGY_STRING_FORMAT_GNUPLOT
+};
+
+enum libbenchmark_topology_numa_mode
+{
+ LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP,
+ LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA,
+ LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED
+};
+
+enum libbenchmark_topology_query
+{
+ LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMBER_OF_NODE_TYPE,
+ LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR
+};
+
+/***** structs *****/
+struct libbenchmark_topology_state
+{
+ int
+ line_width;
+
+ struct lfds710_btree_au_state
+ lp_printing_offset_lookup_tree,
+ topology_tree;
+};
+
+struct libbenchmark_topology_logical_processor_set
+{
+ struct lfds710_list_aso_state
+ logical_processors;
+
+ struct lfds710_list_asu_element
+ lasue;
+};
+
+struct libbenchmark_topology_iterate_state
+{
+ enum libbenchmark_topology_node_type
+ type;
+
+ struct lfds710_btree_au_element
+ *baue;
+};
+
+struct libbenchmark_topology_numa_node
+{
+ enum libbenchmark_topology_numa_mode
+ mode;
+
+ struct lfds710_list_asu_element
+ lasue;
+};
+
+/***** public prototypes *****/
+int libbenchmark_topology_init( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms );
+void libbenchmark_topology_cleanup( struct libbenchmark_topology_state *ts );
+
+void libbenchmark_topology_insert( struct libbenchmark_topology_state *ts, struct libbenchmark_topology_node_state *tns );
+
+int libbenchmark_topology_compare_logical_processor_function( void const *new_key, void const *existing_key );
+
+void libbenchmark_topology_generate_deduplicated_logical_processor_sets( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, struct lfds710_list_asu_state *lp_sets );
+
+void libbenchmark_topology_generate_numa_modes_list( struct libbenchmark_topology_state *ts, enum libbenchmark_topology_numa_mode numa_mode, struct libshared_memory_state *ms, struct lfds710_list_asu_state *numa_modes_list );
+
+char *libbenchmark_topology_generate_string( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, enum libbenchmark_topology_string_format format );
+char *libbenchmark_topology_generate_lpset_string( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, struct lfds710_list_aso_state *lpset );
+
+int libbenchmark_topology_compare_lp_printing_offsets_function( void const *new_key, void const *existing_key );
+int libbenchmark_topology_compare_node_against_lp_printing_offset_function( void const *new_key, void const *existing_key );
+
+void libbenchmark_topology_iterate_init( struct libbenchmark_topology_iterate_state *tis, enum libbenchmark_topology_node_type type );
+int libbenchmark_topology_iterate( struct libbenchmark_topology_state *ts, struct libbenchmark_topology_iterate_state *tis, struct libbenchmark_topology_node_state **tns );
+
+void libbenchmark_topology_query( struct libbenchmark_topology_state *ts, enum libbenchmark_topology_query query_type, void *query_input, void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LIBBENCHMARK_TOPOLOGY_NODE_GET_TYPE( tns, node_type ) (tns).type
+#define LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE( tns, node_type ) (tns).type = (node_type)
+
+#define LIBBENCHMARK_TOPOLOGY_NODE_GET_CACHE_TYPE( tns, cache_type ) (tns).extended_node_info.cache.type
+#define LIBBENCHMARK_TOPOLOGY_NODE_SET_CACHE_TYPE( tns, cache_type ) (tns).extended_node_info.cache.type = (cache_type)
+
+#define LIBBENCHMARK_TOPOLOGY_NODE_GET_CACHE_LEVEL( tns, cache_level ) (tns).extended_node_info.cache.level
+#define LIBBENCHMARK_TOPOLOGY_NODE_SET_CACHE_LEVEL( tns, cache_level ) (tns).extended_node_info.cache.level = (cache_level)
+
+#define LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID( tns ) (tns).extended_node_info.numa.id
+#define LIBBENCHMARK_TOPOLOGY_NODE_SET_NUMA_ID( tns, numa_node_id ) (tns).extended_node_info.numa.id = (numa_node_id)
+
+#define LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( tns ) (tns).extended_node_info.logical_processor.number
+#define LIBBENCHMARK_TOPOLOGY_NODE_SET_LOGICAL_PROCESSOR_NUMBER( tns, processor_number ) (tns).extended_node_info.logical_processor.number = (processor_number)
+
+#define LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( tns ) (tns).extended_node_info.logical_processor.windows_group_number
+#define LIBBENCHMARK_TOPOLOGY_NODE_SET_WINDOWS_GROUP_NUMBER( tns, win_group_number ) (tns).extended_node_info.logical_processor.windows_group_number = (win_group_number), (tns).extended_node_info.logical_processor.windows_group_number_set_flag = RAISED
+#define LIBBENCHMARK_TOPOLOGY_NODE_UNSET_WINDOWS_GROUP_NUMBER( tns ) LIBBENCHMARK_TOPOLOGY_NODE_SET_WINDOWS_GROUP_NUMBER( tns, 0 ), (tns).extended_node_info.logical_processor.windows_group_number_set_flag = LOWERED
+#define LIBBENCHMARK_TOPOLOGY_NODE_IS_WINDOWS_GROUP_NUMBER( tns ) (tns).extended_node_info.logical_processor.windows_group_number_set_flag
+
+/***** enums *****/
+enum libbenchmark_topology_node_type
+{
+ LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR,
+ LIBBENCHMARK_TOPOLOGY_NODE_TYPE_CACHE,
+ LIBBENCHMARK_TOPOLOGY_NODE_TYPE_PHYSICAL_PROCESSOR,
+ LIBBENCHMARK_TOPOLOGY_NODE_TYPE_SOCKET,
+ LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA,
+ LIBBENCHMARK_TOPOLOGY_NODE_TYPE_SYSTEM
+};
+
+enum libbenchmark_topology_node_cache_type
+{
+ LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_DATA,
+ LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_INSTRUCTION,
+ LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_UNIFIED,
+ LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_COUNT
+};
+
+enum libbenchmark_topology_logical_processor_set_encoding
+{
+ LIBBENCHMARK_TOPOLOGY_LOGICAL_PROCESSOR_SET_ENCODING_BITMASK,
+ LIBBENCHMARK_TOPOLOGY_LOGICAL_PROCESSOR_SET_ENCODING_PATH_TO_CSV_HEX,
+ LIBBENCHMARK_TOPOLOGY_LOGICAL_PROCESSOR_SET_ENCODING_SINGLE_LOGICAL_PROCESSOR
+};
+
+/***** structs *****/
+struct libbenchmark_topology_node_cache
+{
+ enum libbenchmark_topology_node_cache_type
+ type;
+
+ lfds710_pal_uint_t
+ level;
+};
+
+struct libbenchmark_topology_node_logical_processor
+{
+ enum flag
+ windows_group_number_set_flag;
+
+ lfds710_pal_uint_t
+ number,
+ windows_group_number;
+};
+
+struct libbenchmark_topology_node_numa
+{
+ lfds710_pal_uint_t
+ id;
+};
+
+// TRD : most node types just *are* (a socket is a socket, etc), but caches, NUMA nodes and LPs have some extra info
+union libbenchmark_topology_node_extended_info
+{
+ struct libbenchmark_topology_node_cache
+ cache;
+
+ struct libbenchmark_topology_node_logical_processor
+ logical_processor;
+
+ struct libbenchmark_topology_node_numa
+ numa;
+};
+
+struct libbenchmark_topology_node_state
+{
+ enum libbenchmark_topology_node_type
+ type;
+
+ struct lfds710_btree_au_element
+ baue;
+
+ struct lfds710_list_aso_element
+ lasoe;
+
+ struct lfds710_list_aso_state
+ logical_processor_children;
+
+ union libbenchmark_topology_node_extended_info
+ extended_node_info;
+};
+
+/***** public prototypes *****/
+void libbenchmark_topology_node_init( struct libbenchmark_topology_node_state *tns );
+void libbenchmark_topology_node_cleanup( struct libbenchmark_topology_node_state *tns, void (*element_cleanup_callback)(struct lfds710_list_aso_state *lasos, struct lfds710_list_aso_element *lasoe) );
+
+int libbenchmark_topology_node_compare_nodes_function( void const *new_key, void const *existing_key );
+int libbenchmark_topology_node_compare_node_types_function( void const *new_key, void const *existing_key );
+
+int libbenchmark_topology_node_compare_lpsets_function( struct lfds710_list_aso_state *lpset_one, struct lfds710_list_aso_state *lpset_two );
+ // TRD : only used in results compare function, where we comapre two lists of nodes which are logical prceossors
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarkinstance_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libbenchmark_benchmarkinstance_cleanup( struct libbenchmark_benchmarkinstance_state *bs )
+{
+ LFDS710_PAL_ASSERT( bs != NULL );
+
+ // TRD : we do naaauuuutttthhiiiinnnn'
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarkinstance_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmarkinstance_init( struct libbenchmark_benchmarkinstance_state *bs,
+ enum libbenchmark_datastructure_id datastructure_id,
+ enum libbenchmark_benchmark_id benchmark_id,
+ enum libbenchmark_lock_id lock_id,
+ struct libbenchmark_topology_state *ts,
+ void (*init_function)( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_node,
+ struct libbenchmark_threadset_state *tsets ),
+ void (*cleanup_function)( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_node,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets ) )
+{
+ LFDS710_PAL_ASSERT( bs != NULL );
+ // TRD : datastructure_id can be any value in its range
+ // TRD : benchmark_id can be any value in its range
+ // TRD : lock_id can be any value in its range
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( init_function != NULL );
+ LFDS710_PAL_ASSERT( cleanup_function != NULL );
+
+ bs->datastructure_id = datastructure_id;
+ bs->benchmark_id = benchmark_id;
+ bs->lock_id = lock_id;
+ bs->ts = ts;
+ bs->init_function = init_function;
+ bs->cleanup_function = cleanup_function;
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarkinstance_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmarkinstance_run( struct libbenchmark_benchmarkinstance_state *bs,
+ struct lfds710_list_aso_state *lpset,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libshared_memory_state *ms,
+ struct libbenchmark_results_state *rs )
+{
+ char
+ *lpset_string,
+ temp[64];
+
+ lfds710_pal_uint_t
+ operation_count;
+
+ struct lfds710_list_aso_element
+ *lasoe;
+
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ LFDS710_PAL_ASSERT( bs != NULL );
+ LFDS710_PAL_ASSERT( lpset != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( rs != NULL );
+
+ libshared_memory_set_rollback( ms );
+
+ lpset_string = libbenchmark_topology_generate_lpset_string( bs->ts, ms, lpset );
+
+ libbenchmark_pal_print_string( lpset_string );
+
+ libbenchmark_pal_print_string( " " );
+ libbenchmark_pal_print_string( libbenchmark_globals_datastructure_names[bs->datastructure_id] );
+ libbenchmark_pal_print_string( " " );
+ libbenchmark_pal_print_string( libbenchmark_globals_lock_names[bs->lock_id] );
+
+ libbenchmark_pal_print_string( " (" );
+ libbenchmark_pal_print_string( libbenchmark_globals_numa_mode_names[numa_mode] );
+ libbenchmark_pal_print_string( ")" );
+
+ bs->init_function( bs->ts, lpset, ms, numa_mode, &bs->tsets );
+
+ libbenchmark_threadset_run( &bs->tsets );
+
+ // TRD : cleanup transfers results to the resultset
+ bs->cleanup_function( lpset, numa_mode, rs, &bs->tsets );
+
+ libbenchmark_threadset_cleanup( &bs->tsets );
+
+ // TRD : print the results
+
+ lasoe = NULL;
+
+ while( LFDS710_LIST_ASO_GET_START_AND_THEN_NEXT(*lpset, lasoe) )
+ {
+ tns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasoe );
+
+ libbenchmark_results_get_result( rs,
+ bs->datastructure_id,
+ bs->benchmark_id,
+ bs->lock_id,
+ numa_mode,
+ lpset,
+ tns,
+ &operation_count );
+
+ libshared_ansi_strcpy( temp, ", " );
+ libshared_ansi_strcat_number( temp, operation_count / libbenchmark_globals_benchmark_duration_in_seconds );
+ libshared_ansi_strcat( temp, " (" );
+
+ // libshared_ansi_strcat_number_with_leading_zeros( temp, LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER(*tns), 3 );
+ libshared_ansi_strcat_number( temp, LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER(*tns) );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_IS_WINDOWS_GROUP_NUMBER(*tns) )
+ {
+ libshared_ansi_strcat( temp, "/" );
+ libshared_ansi_strcat_number( temp, LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER(*tns) );
+ }
+
+ libshared_ansi_strcat( temp, ")" );
+
+ libbenchmark_pal_print_string( temp );
+ }
+
+ libbenchmark_pal_print_string( "\n" );
+
+ libshared_memory_rollback( ms );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_btree_au_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_benchmark_element
+{
+ lfds710_pal_uint_t
+ datum;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element
+ be;
+};
+
+struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count,
+ per_thread_prng_seed;
+};
+
+struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_per_numa_benchmark_state
+{
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state
+ *bs;
+};
+
+struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_overall_benchmark_state
+{
+ enum libbenchmark_topology_numa_mode
+ numa_mode;
+
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state
+ *bs;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_insert_result
+ ir;
+
+ lfds710_pal_uint_t
+ index = 0,
+ loop,
+ number_logical_processors,
+ number_benchmark_elements,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0,
+ total_number_benchmark_elements;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_per_numa_benchmark_state
+ *ptns;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state
+ *bs = NULL;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_thread, NULL );
+
+ total_number_benchmark_elements = number_logical_processors * 1024;
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ LIBBENCHMARK_PRNG_INIT( ps, LFDS710_PRNG_SEED );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_EXISTING_KEY_FAIL, NULL );
+
+ obs->bme = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_benchmark_element) * total_number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, obs->bme[loop].datum );
+ obs->element_key_array[loop] = obs->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_SET_KEY_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_SET_VALUE_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_insert( bs, &obs->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ // TRD : everyone stores their elements in the same NUMA node
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ obs->number_element_keys = total_number_benchmark_elements;
+ obs->bs = bs;
+ obs->numa_mode = numa_mode;
+
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ *element_key_array = NULL,
+ index,
+ number_element_keys = 0,
+ operation_count = 0,
+ random_value,
+ time_loop = 0;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element
+ *existing_be;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state
+ *bs;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_per_numa_benchmark_state
+ *pnbs = NULL;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ if( obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA or obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED )
+ pnbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_NUMA_STATE( *pts );
+
+ bs = obs->bs;
+
+ LIBBENCHMARK_PRNG_INIT( ps, ptbs->per_thread_prng_seed );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ switch( obs->numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ element_key_array = obs->element_key_array;
+ number_element_keys = obs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+ }
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : read
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_GET_VALUE_FROM_ELEMENT( benchmark_state->bs, *existing_be, datum );
+
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : write
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_SET_VALUE_IN_ELEMENT( benchmark_state->bs, *existing_be, benchmark_state->benchmark_element_array[index].datum );
+
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU,
+ LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN,
+ LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_ATOMIC,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_cleanup( obs->bs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_benchmark_element
+ *new_benchmark_element,
+ *existing_benchmark_element;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ new_benchmark_element = (struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_benchmark_element *) new_key;
+ existing_benchmark_element = (struct libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_benchmark_element *) existing_key;
+
+ if( new_benchmark_element->datum < existing_benchmark_element->datum )
+ return -1;
+
+ if( new_benchmark_element->datum > existing_benchmark_element->datum )
+ return 1;
+
+ return 0;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_btree_au_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_benchmark_element
+{
+ lfds710_pal_uint_t
+ datum;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element
+ be;
+};
+
+struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count,
+ per_thread_prng_seed;
+};
+
+struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_per_numa_benchmark_state
+{
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state
+ *bs;
+};
+
+struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_overall_benchmark_state
+{
+ enum libbenchmark_topology_numa_mode
+ numa_mode;
+
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state
+ *bs;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_insert_result
+ ir;
+
+ lfds710_pal_uint_t
+ index = 0,
+ loop,
+ number_logical_processors,
+ number_benchmark_elements,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0,
+ total_number_benchmark_elements;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_per_numa_benchmark_state
+ *ptns;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state
+ *bs = NULL;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_thread, NULL );
+
+ total_number_benchmark_elements = number_logical_processors * 1024;
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ LIBBENCHMARK_PRNG_INIT( ps, LFDS710_PRNG_SEED );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_gcc_spinlock_sync_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_EXISTING_KEY_FAIL, NULL );
+
+ obs->bme = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_benchmark_element) * total_number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, obs->bme[loop].datum );
+ obs->element_key_array[loop] = obs->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_SET_KEY_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_SET_VALUE_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_gcc_spinlock_sync_insert( bs, &obs->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_gcc_spinlock_sync_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_gcc_spinlock_sync_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_gcc_spinlock_sync_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ // TRD : everyone stores their elements in the same NUMA node
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_gcc_spinlock_sync_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ obs->number_element_keys = total_number_benchmark_elements;
+ obs->bs = bs;
+ obs->numa_mode = numa_mode;
+
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ *element_key_array = NULL,
+ index,
+ number_element_keys = 0,
+ operation_count = 0,
+ random_value,
+ time_loop = 0;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element
+ *existing_be;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state
+ *bs;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_per_numa_benchmark_state
+ *pnbs = NULL;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ if( obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA or obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED )
+ pnbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_NUMA_STATE( *pts );
+
+ bs = obs->bs;
+
+ LIBBENCHMARK_PRNG_INIT( ps, ptbs->per_thread_prng_seed );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ switch( obs->numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ element_key_array = obs->element_key_array;
+ number_element_keys = obs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+ }
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : read
+ libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_GET_VALUE_FROM_ELEMENT( benchmark_state->bs, *existing_be, datum );
+
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : write
+ libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_SET_VALUE_IN_ELEMENT( benchmark_state->bs, *existing_be, benchmark_state->benchmark_element_array[index].datum );
+
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU,
+ LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN,
+ LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_SYNC,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;;
+
+ libbenchmark_datastructure_btree_au_gcc_spinlock_sync_cleanup( obs->bs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_benchmark_element
+ *new_benchmark_element,
+ *existing_benchmark_element;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ new_benchmark_element = (struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_benchmark_element *) new_key;
+ existing_benchmark_element = (struct libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_benchmark_element *) existing_key;
+
+ if( new_benchmark_element->datum < existing_benchmark_element->datum )
+ return -1;
+
+ if( new_benchmark_element->datum > existing_benchmark_element->datum )
+ return 1;
+
+ return 0;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_btree_au_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_benchmark_element
+{
+ lfds710_pal_uint_t
+ datum;
+
+ struct lfds700_btree_au_element
+ be;
+};
+
+struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count,
+ per_thread_prng_seed;
+};
+
+struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_per_numa_benchmark_state
+{
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_benchmark_element
+ *bme;
+
+ struct lfds700_btree_au_state
+ *bs;
+};
+
+struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_overall_benchmark_state
+{
+ enum libbenchmark_topology_numa_mode
+ numa_mode;
+
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_benchmark_element
+ *bme;
+
+ struct lfds700_btree_au_state
+ *bs;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ enum lfds700_btree_au_insert_result
+ ir;
+
+ struct lfds700_misc_prng_state
+ lfds700_ps;
+
+ lfds710_pal_uint_t
+ index = 0,
+ loop,
+ number_logical_processors,
+ number_benchmark_elements,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0,
+ total_number_benchmark_elements;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp;
+
+ struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_per_numa_benchmark_state
+ *ptns;
+
+ struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct lfds700_btree_au_state
+ *bs = NULL;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds700_misc_prng_init( &lfds700_ps );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_thread, NULL );
+
+ total_number_benchmark_elements = number_logical_processors * 1024;
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ LIBBENCHMARK_PRNG_INIT( ps, LFDS710_PRNG_SEED );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds700_btree_au_state), LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds700_btree_au_init_valid_on_current_logical_core( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_EXISTING_KEY_FAIL, NULL );
+
+ obs->bme = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_benchmark_element) * total_number_benchmark_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, obs->bme[loop].datum );
+ obs->element_key_array[loop] = obs->bme[loop].datum;
+
+ LFDS700_BTREE_AU_SET_KEY_IN_ELEMENT( obs->bme[loop].be, &obs->bme[loop] );
+ LFDS700_BTREE_AU_SET_VALUE_IN_ELEMENT( obs->bme[loop].be, &obs->bme[loop] );
+ ir = lfds700_btree_au_insert( bs, &obs->bme[loop].be, NULL, &lfds700_ps );
+ }
+ while( ir == LFDS700_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds700_btree_au_state), LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds700_btree_au_init_valid_on_current_logical_core( bs, key_compare_function, LFDS700_BTREE_AU_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_benchmark_element) * number_benchmark_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LFDS700_BTREE_AU_SET_KEY_IN_ELEMENT( ptns->bme[loop].be, &ptns->bme[loop] );
+ LFDS700_BTREE_AU_SET_VALUE_IN_ELEMENT( ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = lfds700_btree_au_insert( bs, &ptns->bme[loop].be, NULL, &lfds700_ps );
+ }
+ while( ir == LFDS700_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds700_btree_au_state), LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds700_btree_au_init_valid_on_current_logical_core( bs, key_compare_function, LFDS700_BTREE_AU_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ // TRD : everyone stores their elements in the same NUMA node
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_benchmark_element) * number_benchmark_elements, LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LFDS700_BTREE_AU_SET_KEY_IN_ELEMENT( ptns->bme[loop].be, &ptns->bme[loop] );
+ LFDS700_BTREE_AU_SET_VALUE_IN_ELEMENT( ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = lfds700_btree_au_insert( bs, &ptns->bme[loop].be, NULL, &lfds700_ps );
+ }
+ while( ir == LFDS700_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ obs->number_element_keys = total_number_benchmark_elements;
+ obs->bs = bs;
+ obs->numa_mode = numa_mode;
+
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ *element_key_array = NULL,
+ index,
+ number_element_keys = 0,
+ operation_count = 0,
+ random_value,
+ time_loop = 0;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct lfds700_btree_au_element
+ *existing_be;
+
+ struct lfds700_btree_au_state
+ *bs;
+
+ struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_per_numa_benchmark_state
+ *pnbs = NULL;
+
+ struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ if( obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA or obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED )
+ pnbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_NUMA_STATE( *pts );
+ bs = obs->bs;
+
+ LIBBENCHMARK_PRNG_INIT( ps, ptbs->per_thread_prng_seed );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ switch( obs->numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ element_key_array = obs->element_key_array;
+ number_element_keys = obs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+ }
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : read
+ lfds700_btree_au_get_by_key( bs, &element_key_array[index], &existing_be );
+ // LFDS700_BTREE_AU_GET_VALUE_FROM_ELEMENT( benchmark_state->bs, *existing_be, datum );
+
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : write
+ lfds700_btree_au_get_by_key( bs, &element_key_array[index], &existing_be );
+ // LFDS700_BTREE_AU_SET_VALUE_IN_ELEMENT( benchmark_state->bs, *existing_be, benchmark_state->benchmark_element_array[index].datum );
+
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU,
+ LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN,
+ LIBBENCHMARK_LOCK_ID_LIBLFDS700_LOCKFREE,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;;
+
+ lfds700_btree_au_cleanup( obs->bs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_benchmark_element
+ *new_benchmark_element,
+ *existing_benchmark_element;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ new_benchmark_element = (struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_benchmark_element *) new_key;
+ existing_benchmark_element = (struct libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_benchmark_element *) existing_key;
+
+ if( new_benchmark_element->datum < existing_benchmark_element->datum )
+ return -1;
+
+ if( new_benchmark_element->datum > existing_benchmark_element->datum )
+ return 1;
+
+ return 0;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_btree_au_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_benchmark_element
+{
+ lfds710_pal_uint_t
+ datum;
+
+ struct lfds710_btree_au_element
+ be;
+};
+
+struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count,
+ per_thread_prng_seed;
+};
+
+struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_per_numa_benchmark_state
+{
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_benchmark_element
+ *bme;
+
+ struct lfds710_btree_au_state
+ *bs;
+};
+
+struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_overall_benchmark_state
+{
+ enum libbenchmark_topology_numa_mode
+ numa_mode;
+
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_benchmark_element
+ *bme;
+
+ struct lfds710_btree_au_state
+ *bs;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ enum lfds710_btree_au_insert_result
+ ir;
+
+ lfds710_pal_uint_t
+ index = 0,
+ loop,
+ number_logical_processors,
+ number_benchmark_elements,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0,
+ total_number_benchmark_elements;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp;
+
+ struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_per_numa_benchmark_state
+ *ptns;
+
+ struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct lfds710_btree_au_state
+ *bs = NULL;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_thread, NULL );
+
+ total_number_benchmark_elements = number_logical_processors * 1024;
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ LIBBENCHMARK_PRNG_INIT( ps, LFDS710_PRNG_SEED );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds710_btree_au_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_btree_au_init_valid_on_current_logical_core( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_EXISTING_KEY_FAIL, NULL );
+
+ obs->bme = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_benchmark_element) * total_number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, obs->bme[loop].datum );
+ obs->element_key_array[loop] = obs->bme[loop].datum;
+
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( obs->bme[loop].be, &obs->bme[loop] );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( obs->bme[loop].be, &obs->bme[loop] );
+ ir = lfds710_btree_au_insert( bs, &obs->bme[loop].be, NULL );
+ }
+ while( ir == LFDS710_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_btree_au_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_btree_au_init_valid_on_current_logical_core( bs, key_compare_function, LFDS710_BTREE_AU_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( ptns->bme[loop].be, &ptns->bme[loop] );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = lfds710_btree_au_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LFDS710_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_btree_au_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_btree_au_init_valid_on_current_logical_core( bs, key_compare_function, LFDS710_BTREE_AU_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ // TRD : everyone stores their elements in the same NUMA node
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( ptns->bme[loop].be, &ptns->bme[loop] );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = lfds710_btree_au_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LFDS710_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ obs->number_element_keys = total_number_benchmark_elements;
+ obs->bs = bs;
+ obs->numa_mode = numa_mode;
+
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ *element_key_array = NULL,
+ index,
+ number_element_keys = 0,
+ operation_count = 0,
+ random_value,
+ time_loop = 0;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct lfds710_btree_au_element
+ *existing_be;
+
+ struct lfds710_btree_au_state
+ *bs;
+
+ struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_per_numa_benchmark_state
+ *pnbs = NULL;
+
+ struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ if( obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA or obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED )
+ pnbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_NUMA_STATE( *pts );
+ bs = obs->bs;
+
+ LIBBENCHMARK_PRNG_INIT( ps, ptbs->per_thread_prng_seed );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ switch( obs->numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ element_key_array = obs->element_key_array;
+ number_element_keys = obs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+ }
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : read
+ lfds710_btree_au_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( benchmark_state->bs, *existing_be, datum );
+
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : write
+ lfds710_btree_au_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( benchmark_state->bs, *existing_be, benchmark_state->benchmark_element_array[index].datum );
+
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU,
+ LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN,
+ LIBBENCHMARK_LOCK_ID_LIBLFDS710_LOCKFREE,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;;
+
+ lfds710_btree_au_cleanup( obs->bs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_benchmark_element
+ *new_benchmark_element,
+ *existing_benchmark_element;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ new_benchmark_element = (struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_benchmark_element *) new_key;
+ existing_benchmark_element = (struct libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_benchmark_element *) existing_key;
+
+ if( new_benchmark_element->datum < existing_benchmark_element->datum )
+ return -1;
+
+ if( new_benchmark_element->datum > existing_benchmark_element->datum )
+ return 1;
+
+ return 0;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_btree_au_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_benchmark_element
+{
+ lfds710_pal_uint_t
+ datum;
+
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element
+ be;
+};
+
+struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count,
+ per_thread_prng_seed;
+};
+
+struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_per_numa_benchmark_state
+{
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_state
+ *bs;
+};
+
+struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_overall_benchmark_state
+{
+ enum libbenchmark_topology_numa_mode
+ numa_mode;
+
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_state
+ *bs;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ enum libbenchmark_datastructure_btree_au_msvc_spinlock_insert_result
+ ir;
+
+ lfds710_pal_uint_t
+ index = 0,
+ loop,
+ number_logical_processors,
+ number_benchmark_elements,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0,
+ total_number_benchmark_elements;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp;
+
+ struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_per_numa_benchmark_state
+ *ptns;
+
+ struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_state
+ *bs = NULL;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_thread, NULL );
+
+ total_number_benchmark_elements = number_logical_processors * 1024;
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ LIBBENCHMARK_PRNG_INIT( ps, LFDS710_PRNG_SEED );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_btree_au_msvc_spinlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_msvc_spinlock_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_EXISTING_KEY_FAIL, NULL );
+
+ obs->bme = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_benchmark_element) * total_number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, obs->bme[loop].datum );
+ obs->element_key_array[loop] = obs->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_SET_KEY_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_SET_VALUE_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_msvc_spinlock_insert( bs, &obs->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_msvc_spinlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_msvc_spinlock_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_msvc_spinlock_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_msvc_spinlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_msvc_spinlock_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ // TRD : everyone stores their elements in the same NUMA node
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_msvc_spinlock_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ obs->number_element_keys = total_number_benchmark_elements;
+ obs->bs = bs;
+ obs->numa_mode = numa_mode;
+
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ *element_key_array = NULL,
+ index,
+ number_element_keys = 0,
+ operation_count = 0,
+ random_value,
+ time_loop = 0;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element
+ *existing_be;
+
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_state
+ *bs;
+
+ struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_per_numa_benchmark_state
+ *pnbs = NULL;
+
+ struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ if( obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA or obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED )
+ pnbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_NUMA_STATE( *pts );
+
+ bs = obs->bs;
+
+ LIBBENCHMARK_PRNG_INIT( ps, ptbs->per_thread_prng_seed );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ switch( obs->numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ element_key_array = obs->element_key_array;
+ number_element_keys = obs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+ }
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : read
+ libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_GET_VALUE_FROM_ELEMENT( benchmark_state->bs, *existing_be, datum );
+
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : write
+ libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_SET_VALUE_IN_ELEMENT( benchmark_state->bs, *existing_be, benchmark_state->benchmark_element_array[index].datum );
+
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU,
+ LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN,
+ LIBBENCHMARK_LOCK_ID_MSVC_SPINLOCK,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;;
+
+ libbenchmark_datastructure_btree_au_msvc_spinlock_cleanup( obs->bs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_benchmark_element
+ *new_benchmark_element,
+ *existing_benchmark_element;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ new_benchmark_element = (struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_benchmark_element *) new_key;
+ existing_benchmark_element = (struct libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_benchmark_element *) existing_key;
+
+ if( new_benchmark_element->datum < existing_benchmark_element->datum )
+ return -1;
+
+ if( new_benchmark_element->datum > existing_benchmark_element->datum )
+ return 1;
+
+ return 0;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_btree_au_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_benchmark_element
+{
+ lfds710_pal_uint_t
+ datum;
+
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element
+ be;
+};
+
+struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count,
+ per_thread_prng_seed;
+};
+
+struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_per_numa_benchmark_state
+{
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_state
+ *bs;
+};
+
+struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_overall_benchmark_state
+{
+ enum libbenchmark_topology_numa_mode
+ numa_mode;
+
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_state
+ *bs;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ enum libbenchmark_datastructure_btree_au_pthread_mutex_insert_result
+ ir;
+
+ lfds710_pal_uint_t
+ index = 0,
+ loop,
+ number_logical_processors,
+ number_benchmark_elements,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0,
+ total_number_benchmark_elements;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp;
+
+ struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_per_numa_benchmark_state
+ *ptns;
+
+ struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_state
+ *bs = NULL;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_thread, NULL );
+
+ total_number_benchmark_elements = number_logical_processors * 1024;
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ LIBBENCHMARK_PRNG_INIT( ps, LFDS710_PRNG_SEED );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_btree_au_pthread_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_pthread_mutex_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_EXISTING_KEY_FAIL, NULL );
+
+ obs->bme = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_benchmark_element) * total_number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, obs->bme[loop].datum );
+ obs->element_key_array[loop] = obs->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_SET_KEY_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_SET_VALUE_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_pthread_mutex_insert( bs, &obs->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_pthread_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_pthread_mutex_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_pthread_mutex_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_pthread_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_pthread_mutex_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ // TRD : everyone stores their elements in the same NUMA node
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_pthread_mutex_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ obs->number_element_keys = total_number_benchmark_elements;
+ obs->bs = bs;
+ obs->numa_mode = numa_mode;
+
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ *element_key_array = NULL,
+ index,
+ number_element_keys = 0,
+ operation_count = 0,
+ random_value,
+ time_loop = 0;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element
+ *existing_be;
+
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_state
+ *bs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_per_numa_benchmark_state
+ *pnbs = NULL;
+
+ struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ if( obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA or obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED )
+ pnbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_NUMA_STATE( *pts );
+
+ bs = obs->bs;
+
+ LIBBENCHMARK_PRNG_INIT( ps, ptbs->per_thread_prng_seed );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ switch( obs->numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ element_key_array = obs->element_key_array;
+ number_element_keys = obs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+ }
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : read
+ libbenchmark_datastructure_btree_au_pthread_mutex_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_GET_VALUE_FROM_ELEMENT( benchmark_state->bs, *existing_be, datum );
+
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : write
+ libbenchmark_datastructure_btree_au_pthread_mutex_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_SET_VALUE_IN_ELEMENT( benchmark_state->bs, *existing_be, benchmark_state->benchmark_element_array[index].datum );
+
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU,
+ LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_MUTEX,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;;
+
+ libbenchmark_datastructure_btree_au_pthread_mutex_cleanup( obs->bs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_benchmark_element
+ *new_benchmark_element,
+ *existing_benchmark_element;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ new_benchmark_element = (struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_benchmark_element *) new_key;
+ existing_benchmark_element = (struct libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_benchmark_element *) existing_key;
+
+ if( new_benchmark_element->datum < existing_benchmark_element->datum )
+ return -1;
+
+ if( new_benchmark_element->datum > existing_benchmark_element->datum )
+ return 1;
+
+ return 0;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_btree_au_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_benchmark_element
+{
+ lfds710_pal_uint_t
+ datum;
+
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element
+ be;
+};
+
+struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count,
+ per_thread_prng_seed;
+};
+
+struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_per_numa_benchmark_state
+{
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_state
+ *bs;
+};
+
+struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_overall_benchmark_state
+{
+ enum libbenchmark_topology_numa_mode
+ numa_mode;
+
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_state
+ *bs;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ enum libbenchmark_datastructure_btree_au_pthread_rwlock_insert_result
+ ir;
+
+ lfds710_pal_uint_t
+ index = 0,
+ loop,
+ number_logical_processors,
+ number_benchmark_elements,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0,
+ total_number_benchmark_elements;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp;
+
+ struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_per_numa_benchmark_state
+ *ptns;
+
+ struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_state
+ *bs = NULL;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_thread, NULL );
+
+ total_number_benchmark_elements = number_logical_processors * 1024;
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ LIBBENCHMARK_PRNG_INIT( ps, LFDS710_PRNG_SEED );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_btree_au_pthread_rwlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_pthread_rwlock_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_EXISTING_KEY_FAIL, NULL );
+
+ obs->bme = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_benchmark_element) * total_number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, obs->bme[loop].datum );
+ obs->element_key_array[loop] = obs->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_SET_KEY_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_SET_VALUE_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_pthread_rwlock_insert( bs, &obs->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_pthread_rwlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_pthread_rwlock_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_pthread_rwlock_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_pthread_rwlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_pthread_rwlock_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ // TRD : everyone stores their elements in the same NUMA node
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_pthread_rwlock_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ obs->number_element_keys = total_number_benchmark_elements;
+ obs->bs = bs;
+ obs->numa_mode = numa_mode;
+
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ *element_key_array = NULL,
+ index,
+ number_element_keys = 0,
+ operation_count = 0,
+ random_value,
+ time_loop = 0;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element
+ *existing_be;
+
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_state
+ *bs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_per_numa_benchmark_state
+ *pnbs = NULL;
+
+ struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ if( obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA or obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED )
+ pnbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_NUMA_STATE( *pts );
+
+ bs = obs->bs;
+
+ LIBBENCHMARK_PRNG_INIT( ps, ptbs->per_thread_prng_seed );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ switch( obs->numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ element_key_array = obs->element_key_array;
+ number_element_keys = obs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+ }
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : read
+ libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_key_for_read( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_GET_VALUE_FROM_ELEMENT( benchmark_state->bs, *existing_be, datum );
+
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : write
+ libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_key_for_write( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_SET_VALUE_IN_ELEMENT( benchmark_state->bs, *existing_be, benchmark_state->benchmark_element_array[index].datum );
+
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU,
+ LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_RWLOCK,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;;
+
+ libbenchmark_datastructure_btree_au_pthread_rwlock_cleanup( obs->bs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_benchmark_element
+ *new_benchmark_element,
+ *existing_benchmark_element;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ new_benchmark_element = (struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_benchmark_element *) new_key;
+ existing_benchmark_element = (struct libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_benchmark_element *) existing_key;
+
+ if( new_benchmark_element->datum < existing_benchmark_element->datum )
+ return -1;
+
+ if( new_benchmark_element->datum > existing_benchmark_element->datum )
+ return 1;
+
+ return 0;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_btree_au_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_benchmark_element
+{
+ lfds710_pal_uint_t
+ datum;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element
+ be;
+};
+
+struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count,
+ per_thread_prng_seed;
+};
+
+struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_per_numa_benchmark_state
+{
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state
+ *bs;
+};
+
+struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_overall_benchmark_state
+{
+ enum libbenchmark_topology_numa_mode
+ numa_mode;
+
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state
+ *bs;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_insert_result
+ ir;
+
+ lfds710_pal_uint_t
+ index = 0,
+ loop,
+ number_logical_processors,
+ number_benchmark_elements,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0,
+ total_number_benchmark_elements;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_per_numa_benchmark_state
+ *ptns;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state
+ *bs = NULL;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_thread, NULL );
+
+ total_number_benchmark_elements = number_logical_processors * 1024;
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ LIBBENCHMARK_PRNG_INIT( ps, LFDS710_PRNG_SEED );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_EXISTING_KEY_FAIL, NULL );
+
+ obs->bme = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_benchmark_element) * total_number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, obs->bme[loop].datum );
+ obs->element_key_array[loop] = obs->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_KEY_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_VALUE_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_insert( bs, &obs->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ // TRD : everyone stores their elements in the same NUMA node
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ obs->number_element_keys = total_number_benchmark_elements;
+ obs->bs = bs;
+ obs->numa_mode = numa_mode;
+
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ *element_key_array = NULL,
+ index,
+ number_element_keys = 0,
+ operation_count = 0,
+ random_value,
+ time_loop = 0;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element
+ *existing_be;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state
+ *bs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_per_numa_benchmark_state
+ *pnbs = NULL;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ if( obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA or obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED )
+ pnbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_NUMA_STATE( *pts );
+
+ bs = obs->bs;
+
+ LIBBENCHMARK_PRNG_INIT( ps, ptbs->per_thread_prng_seed );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ switch( obs->numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ element_key_array = obs->element_key_array;
+ number_element_keys = obs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+ }
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : read
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET_VALUE_FROM_ELEMENT( benchmark_state->bs, *existing_be, datum );
+
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : write
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_VALUE_IN_ELEMENT( benchmark_state->bs, *existing_be, benchmark_state->benchmark_element_array[index].datum );
+
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU,
+ LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_PRIVATE,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;;
+
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_cleanup( obs->bs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_benchmark_element
+ *new_benchmark_element,
+ *existing_benchmark_element;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ new_benchmark_element = (struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_benchmark_element *) new_key;
+ existing_benchmark_element = (struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_benchmark_element *) existing_key;
+
+ if( new_benchmark_element->datum < existing_benchmark_element->datum )
+ return -1;
+
+ if( new_benchmark_element->datum > existing_benchmark_element->datum )
+ return 1;
+
+ return 0;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_btree_au_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_benchmark_element
+{
+ lfds710_pal_uint_t
+ datum;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element
+ be;
+};
+
+struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count,
+ per_thread_prng_seed;
+};
+
+struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_per_numa_benchmark_state
+{
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state
+ *bs;
+};
+
+struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_overall_benchmark_state
+{
+ enum libbenchmark_topology_numa_mode
+ numa_mode;
+
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state
+ *bs;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_insert_result
+ ir;
+
+ lfds710_pal_uint_t
+ index = 0,
+ loop,
+ number_logical_processors,
+ number_benchmark_elements,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0,
+ total_number_benchmark_elements;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_per_numa_benchmark_state
+ *ptns;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state
+ *bs = NULL;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_thread, NULL );
+
+ total_number_benchmark_elements = number_logical_processors * 1024;
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ LIBBENCHMARK_PRNG_INIT( ps, LFDS710_PRNG_SEED );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_EXISTING_KEY_FAIL, NULL );
+
+ obs->bme = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_benchmark_element) * total_number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, obs->bme[loop].datum );
+ obs->element_key_array[loop] = obs->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_KEY_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_VALUE_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_insert( bs, &obs->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ // TRD : everyone stores their elements in the same NUMA node
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ obs->number_element_keys = total_number_benchmark_elements;
+ obs->bs = bs;
+ obs->numa_mode = numa_mode;
+
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ *element_key_array = NULL,
+ index,
+ number_element_keys = 0,
+ operation_count = 0,
+ random_value,
+ time_loop = 0;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element
+ *existing_be;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state
+ *bs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_per_numa_benchmark_state
+ *pnbs = NULL;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ if( obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA or obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED )
+ pnbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_NUMA_STATE( *pts );
+
+ bs = obs->bs;
+
+ LIBBENCHMARK_PRNG_INIT( ps, ptbs->per_thread_prng_seed );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ switch( obs->numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ element_key_array = obs->element_key_array;
+ number_element_keys = obs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+ }
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : read
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_GET_VALUE_FROM_ELEMENT( benchmark_state->bs, *existing_be, datum );
+
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : write
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_VALUE_IN_ELEMENT( benchmark_state->bs, *existing_be, benchmark_state->benchmark_element_array[index].datum );
+
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU,
+ LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_SHARED,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;;
+
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_cleanup( obs->bs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_benchmark_element
+ *new_benchmark_element,
+ *existing_benchmark_element;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ new_benchmark_element = (struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_benchmark_element *) new_key;
+ existing_benchmark_element = (struct libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_benchmark_element *) existing_key;
+
+ if( new_benchmark_element->datum < existing_benchmark_element->datum )
+ return -1;
+
+ if( new_benchmark_element->datum > existing_benchmark_element->datum )
+ return 1;
+
+ return 0;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_btree_au_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_benchmark_element
+{
+ lfds710_pal_uint_t
+ datum;
+
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element
+ be;
+};
+
+struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count,
+ per_thread_prng_seed;
+};
+
+struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_per_numa_benchmark_state
+{
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_state
+ *bs;
+};
+
+struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_overall_benchmark_state
+{
+ enum libbenchmark_topology_numa_mode
+ numa_mode;
+
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_state
+ *bs;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ enum libbenchmark_datastructure_btree_au_windows_critical_section_insert_result
+ ir;
+
+ lfds710_pal_uint_t
+ index = 0,
+ loop,
+ number_logical_processors,
+ number_benchmark_elements,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0,
+ total_number_benchmark_elements;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp;
+
+ struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_per_numa_benchmark_state
+ *ptns;
+
+ struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_state
+ *bs = NULL;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_thread, NULL );
+
+ total_number_benchmark_elements = number_logical_processors * 1024;
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ LIBBENCHMARK_PRNG_INIT( ps, LFDS710_PRNG_SEED );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_btree_au_windows_critical_section_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_windows_critical_section_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_EXISTING_KEY_FAIL, NULL );
+
+ obs->bme = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_benchmark_element) * total_number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, obs->bme[loop].datum );
+ obs->element_key_array[loop] = obs->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_SET_KEY_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_SET_VALUE_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_windows_critical_section_insert( bs, &obs->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_windows_critical_section_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_windows_critical_section_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_windows_critical_section_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_windows_critical_section_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_windows_critical_section_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ // TRD : everyone stores their elements in the same NUMA node
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_windows_critical_section_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ obs->number_element_keys = total_number_benchmark_elements;
+ obs->bs = bs;
+ obs->numa_mode = numa_mode;
+
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ *element_key_array = NULL,
+ index,
+ number_element_keys = 0,
+ operation_count = 0,
+ random_value,
+ time_loop = 0;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element
+ *existing_be;
+
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_state
+ *bs;
+
+ struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_per_numa_benchmark_state
+ *pnbs = NULL;
+
+ struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ if( obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA or obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED )
+ pnbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_NUMA_STATE( *pts );
+
+ bs = obs->bs;
+
+ LIBBENCHMARK_PRNG_INIT( ps, ptbs->per_thread_prng_seed );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ switch( obs->numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ element_key_array = obs->element_key_array;
+ number_element_keys = obs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+ }
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : read
+ libbenchmark_datastructure_btree_au_windows_critical_section_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_GET_VALUE_FROM_ELEMENT( benchmark_state->bs, *existing_be, datum );
+
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : write
+ libbenchmark_datastructure_btree_au_windows_critical_section_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_SET_VALUE_IN_ELEMENT( benchmark_state->bs, *existing_be, benchmark_state->benchmark_element_array[index].datum );
+
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU,
+ LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN,
+ LIBBENCHMARK_LOCK_ID_WINDOWS_CRITICAL_SECTION,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;;
+
+ libbenchmark_datastructure_btree_au_windows_critical_section_cleanup( obs->bs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_benchmark_element
+ *new_benchmark_element,
+ *existing_benchmark_element;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ new_benchmark_element = (struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_benchmark_element *) new_key;
+ existing_benchmark_element = (struct libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_benchmark_element *) existing_key;
+
+ if( new_benchmark_element->datum < existing_benchmark_element->datum )
+ return -1;
+
+ if( new_benchmark_element->datum > existing_benchmark_element->datum )
+ return 1;
+
+ return 0;
+}
+
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_btree_au_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_benchmark_element
+{
+ lfds710_pal_uint_t
+ datum;
+
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element
+ be;
+};
+
+struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count,
+ per_thread_prng_seed;
+};
+
+struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_per_numa_benchmark_state
+{
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_windows_mutex_state
+ *bs;
+};
+
+struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_overall_benchmark_state
+{
+ enum libbenchmark_topology_numa_mode
+ numa_mode;
+
+ lfds710_pal_uint_t
+ *element_key_array,
+ number_element_keys;
+
+ struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_benchmark_element
+ *bme;
+
+ struct libbenchmark_datastructure_btree_au_windows_mutex_state
+ *bs;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ enum libbenchmark_datastructure_btree_au_windows_mutex_insert_result
+ ir;
+
+ lfds710_pal_uint_t
+ index = 0,
+ loop,
+ number_logical_processors,
+ number_benchmark_elements,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0,
+ total_number_benchmark_elements;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp;
+
+ struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_per_numa_benchmark_state
+ *ptns;
+
+ struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_btree_au_windows_mutex_state
+ *bs = NULL;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_thread, NULL );
+
+ total_number_benchmark_elements = number_logical_processors * 1024;
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ LIBBENCHMARK_PRNG_INIT( ps, LFDS710_PRNG_SEED );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_btree_au_windows_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_windows_mutex_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_EXISTING_KEY_FAIL, NULL );
+
+ obs->bme = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_benchmark_element) * total_number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, obs->bme[loop].datum );
+ obs->element_key_array[loop] = obs->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_SET_KEY_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_SET_VALUE_IN_ELEMENT( *bs, obs->bme[loop].be, &obs->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_windows_mutex_insert( bs, &obs->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_windows_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_windows_mutex_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_windows_mutex_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ bs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_btree_au_windows_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_btree_au_windows_mutex_init( bs, key_compare_function, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_EXISTING_KEY_FAIL, NULL );
+
+ obs->element_key_array = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1024 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ number_benchmark_elements = number_logical_processors_in_numa_node * 1024;
+
+ ptns = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_per_numa_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ptns->element_key_array = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(lfds710_pal_uint_t) * total_number_benchmark_elements, sizeof(lfds710_pal_uint_t) );
+ ptns->number_element_keys = total_number_benchmark_elements;
+
+ // TRD : everyone stores their elements in the same NUMA node
+ ptns->bme = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_benchmark_element) * number_benchmark_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_benchmark_elements ; loop++, index++ )
+ do
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptns->bme[loop].datum );
+ obs->element_key_array[index] = ptns->bme[loop].datum;
+
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_SET_KEY_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_SET_VALUE_IN_ELEMENT( *bs, ptns->bme[loop].be, &ptns->bme[loop] );
+ ir = libbenchmark_datastructure_btree_au_windows_mutex_insert( bs, &ptns->bme[loop].be, NULL );
+ }
+ while( ir == LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_INSERT_RESULT_FAILURE_EXISTING_KEY );
+
+ pns->users_per_numa_state = ptns;
+ }
+
+ // TRD : now copy over into each NUMA node state the element_key_array
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptns = pns->users_per_numa_state;
+
+ for( loop = 0 ; loop < total_number_benchmark_elements ; loop++ )
+ ptns->element_key_array[loop] = obs->element_key_array[loop];
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LIBBENCHMARK_PRNG_GENERATE( ps, ptbs->per_thread_prng_seed );
+ LIBBENCHMARK_PRNG_MURMURHASH3_MIXING_FUNCTION( ptbs->per_thread_prng_seed );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ obs->number_element_keys = total_number_benchmark_elements;
+ obs->bs = bs;
+ obs->numa_mode = numa_mode;
+
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ *element_key_array = NULL,
+ index,
+ number_element_keys = 0,
+ operation_count = 0,
+ random_value,
+ time_loop = 0;
+
+ struct libbenchmark_prng_state
+ ps;
+
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element
+ *existing_be;
+
+ struct libbenchmark_datastructure_btree_au_windows_mutex_state
+ *bs;
+
+ struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_per_numa_benchmark_state
+ *pnbs = NULL;
+
+ struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ if( obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA or obs->numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED )
+ pnbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_NUMA_STATE( *pts );
+
+ bs = obs->bs;
+
+ LIBBENCHMARK_PRNG_INIT( ps, ptbs->per_thread_prng_seed );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ switch( obs->numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ element_key_array = obs->element_key_array;
+ number_element_keys = obs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ element_key_array = pnbs->element_key_array;
+ number_element_keys = pnbs->number_element_keys;
+ break;
+ }
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : read
+ libbenchmark_datastructure_btree_au_windows_mutex_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_GET_VALUE_FROM_ELEMENT( benchmark_state->bs, *existing_be, datum );
+
+ LIBBENCHMARK_PRNG_GENERATE( ps, random_value );
+ index = random_value % number_element_keys;
+
+ // TRD : write
+ libbenchmark_datastructure_btree_au_windows_mutex_get_by_key( bs, NULL, &element_key_array[index], &existing_be );
+ // LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_SET_VALUE_IN_ELEMENT( benchmark_state->bs, *existing_be, benchmark_state->benchmark_element_array[index].datum );
+
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU,
+ LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN,
+ LIBBENCHMARK_LOCK_ID_WINDOWS_MUTEX,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;;
+
+ libbenchmark_datastructure_btree_au_windows_mutex_cleanup( obs->bs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_benchmark_element
+ *new_benchmark_element,
+ *existing_benchmark_element;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ new_benchmark_element = (struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_benchmark_element *) new_key;
+ existing_benchmark_element = (struct libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_benchmark_element *) existing_key;
+
+ if( new_benchmark_element->datum < existing_benchmark_element->datum )
+ return -1;
+
+ if( new_benchmark_element->datum > existing_benchmark_element->datum )
+ return 1;
+
+ return 0;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_freelist_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element
+ *fe;
+
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state
+ *fs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ fs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_gcc_spinlock_atomic_init( fs, NULL );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_gcc_spinlock_atomic_push( fs, &fe[loop] );
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ /* TRD : init the freelist from the NUMA node with most processors from the current set
+ or, if equal threads, with lowest NUMA
+ iterate over the NUMA node list
+ for each NUMA node, allocate one freelist element per thread on that node
+ and push those elements onto the stack
+
+ the loop over the threads, and give each one the freelist state as it's user state
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_gcc_spinlock_atomic_init( fs, NULL );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ fe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_freelist_gcc_spinlock_atomic_push( fs, &fe[loop] );
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ /* TRD : freelist state in the NUMA node with most threads from the current set
+ or, if equal threads, with lowest NUMA
+ all elements alloced from that node as well
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_gcc_spinlock_atomic_init( fs, NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_gcc_spinlock_atomic_push( fs, &fe[loop] );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ tsets->users_threadset_state = fs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state
+ *fs;
+
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element
+ *fe;
+
+ struct libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ fs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_freelist_gcc_spinlock_atomic_pop( fs, &fe );
+ libbenchmark_datastructure_freelist_gcc_spinlock_atomic_push( fs, fe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state
+ *fs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST,
+ LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1,
+ LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_ATOMIC,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ fs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_freelist_gcc_spinlock_atomic_cleanup( fs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_freelist_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element
+ *fe;
+
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state
+ *fs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ fs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_gcc_spinlock_sync_init( fs, NULL );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_gcc_spinlock_sync_push( fs, &fe[loop] );
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ /* TRD : init the freelist from the NUMA node with most processors from the current set
+ or, if equal threads, with lowest NUMA
+ iterate over the NUMA node list
+ for each NUMA node, allocate one freelist element per thread on that node
+ and push those elements onto the stack
+
+ the loop over the threads, and give each one the freelist state as it's user state
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_gcc_spinlock_sync_init( fs, NULL );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ fe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_freelist_gcc_spinlock_sync_push( fs, &fe[loop] );
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ /* TRD : freelist state in the NUMA node with most threads from the current set
+ or, if equal threads, with lowest NUMA
+ all elements alloced from that node as well
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_gcc_spinlock_sync_init( fs, NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_gcc_spinlock_sync_push( fs, &fe[loop] );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ tsets->users_threadset_state = fs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state
+ *fs;
+
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element
+ *fe;
+
+ struct libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ fs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_freelist_gcc_spinlock_sync_pop( fs, &fe );
+ libbenchmark_datastructure_freelist_gcc_spinlock_sync_push( fs, fe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state
+ *fs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST,
+ LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1,
+ LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_SYNC,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ fs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_freelist_gcc_spinlock_sync_cleanup( fs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_freelist_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+struct libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_overall_benchmark_state
+{
+ struct lfds700_freelist_state
+ *fs;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ enum flag
+ finished_flag = LOWERED;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ lfds710_pal_uint_t
+ *fe_array_count,
+ index = 0,
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ number_numa_nodes,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct lfds700_freelist_element
+ *fe,
+ **fe_array_pointers;
+
+ struct lfds700_freelist_state
+ *fs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds700_misc_prng_init( &ps );
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds700_freelist_state), LFDS700_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds700_freelist_init_valid_on_current_logical_core( fs, NULL );
+
+ fe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds710_freelist_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ lfds700_freelist_push( fs, &fe[loop], &ps );
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ /* TRD : init the freelist from the NUMA node with most processors from the current set
+ or, if equal threads, with lowest NUMA
+ iterate over the NUMA node list
+ for each NUMA node, allocate one freelist element per thread on that node
+ and push those elements onto the freelist
+
+ the loop over the threads, and give each one the freelist state as it's user state
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_freelist_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds700_freelist_init_valid_on_current_logical_core( fs, NULL );
+
+ /* TRD : now figure out how many elements are needed from each NUMA node
+ allocate them all
+ them push them interleaved, round-robin, to the freelist
+ */
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMBER_OF_NODE_TYPE, (void *) (lfds710_pal_uint_t) LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA, (void *) &number_numa_nodes );
+
+ fe_array_pointers = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds710_freelist_element *) * number_numa_nodes, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ fe_array_count = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * number_numa_nodes, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_numa_nodes ; loop++ )
+ fe_array_count[loop] = 0;
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ fe_array_count[index] = number_logical_processors_in_numa_node;
+ fe_array_pointers[index] = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct lfds710_freelist_element) * fe_array_count[index], LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ index++;
+ }
+
+ while( finished_flag == LOWERED )
+ {
+ for( loop = 0 ; loop < index ; loop++ )
+ if( fe_array_count[loop] > 0 )
+ lfds700_freelist_push( fs, &fe_array_pointers[loop][ fe_array_count[loop]-- ], &ps );
+
+ finished_flag = RAISED;
+
+ for( loop = 0 ; loop < index ; loop++ )
+ if( fe_array_count[loop] > 0 )
+ finished_flag = LOWERED;
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ /* TRD : freelist state in the NUMA node with most threads from the current set
+ or, if equal threads, with lowest NUMA
+ all elements alloced from that node as well
+
+ SO much easier to figure out allocs than with NUMA OMG
+ all of this code needs rewriting
+ and the NUMA-but-not-used stuff is interesting but I don't think it carries its own weight
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_freelist_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds700_freelist_init_valid_on_current_logical_core( fs, NULL );
+
+ // TRD : fill the elimination array and have one element per thread in the freelist proper
+ fe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_freelist_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ lfds700_freelist_push( fs, &fe[loop], &ps );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ obs->fs = fs;
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ struct lfds700_freelist_state
+ *fs;
+
+ struct lfds700_freelist_element
+ *fe;
+
+ struct libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ fs = obs->fs;
+
+ lfds700_misc_prng_init( &ps );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ lfds700_freelist_pop( fs, &fe, &ps );
+ lfds700_freelist_push( fs, fe, &ps );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST,
+ LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1,
+ LIBBENCHMARK_LOCK_ID_LIBLFDS700_LOCKFREE,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;
+
+ lfds700_freelist_cleanup( obs->fs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_freelist_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+
+ struct lfds710_prng_st_state
+ psts;
+};
+
+struct libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_overall_benchmark_state
+{
+ struct lfds710_freelist_state
+ *fs;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ enum flag
+ finished_flag = LOWERED;
+
+ lfds710_pal_uint_t
+ ea_size_in_freelist_elements,
+ *fe_array_count,
+ index = 0,
+ loop,
+ number_freelist_elements,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ number_numa_nodes,
+ number_freelist_element_pointers_per_atomic_isolation = LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES / sizeof(struct lfds710_freelist_element *),
+ largest_number_logical_processors_in_numa_node = 0,
+ random_value,
+ smallest_power_of_two_larger_than_or_equal_to_number_logical_processors = 2,
+ temp_number_logical_processors;
+
+ struct lfds710_freelist_element * volatile
+ (*ea)[LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS];
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct lfds710_prng_st_state
+ psts;
+
+ struct libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct lfds710_freelist_element
+ *fe,
+ **fe_array_pointers;
+
+ struct lfds710_freelist_state
+ *fs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds710_prng_st_init( &psts, LFDS710_PRNG_SEED );
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ temp_number_logical_processors = number_logical_processors >> 2;
+ while( temp_number_logical_processors != 0 )
+ {
+ temp_number_logical_processors >>= 1;
+ smallest_power_of_two_larger_than_or_equal_to_number_logical_processors <<= 1;
+ }
+ fs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds710_freelist_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ea = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct lfds710_freelist_element *) * LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS * smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_freelist_init_valid_on_current_logical_core( fs, ea, smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, NULL );
+
+ // TRD : fill the elimination array and have one element per thread in the freelist proper
+ number_freelist_elements = (smallest_power_of_two_larger_than_or_equal_to_number_logical_processors * number_freelist_element_pointers_per_atomic_isolation) + number_logical_processors;
+ fe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds710_freelist_element) * number_freelist_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_freelist_elements ; loop++ )
+ lfds710_freelist_push( fs, &fe[loop], &psts );
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LFDS710_PRNG_ST_GENERATE( psts, random_value );
+ LFDS710_PRNG_ST_MIXING_FUNCTION( random_value );
+ lfds710_prng_st_init( &ptbs->psts, random_value );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ /* TRD : init the freelist from the NUMA node with most processors from the current set
+ or, if equal threads, with lowest NUMA
+ iterate over the NUMA node list
+ for each NUMA node, allocate one freelist element per thread on that node
+ and push those elements onto the freelist
+
+ the loop over the threads, and give each one the freelist state as it's user state
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ temp_number_logical_processors = number_logical_processors >> 2;
+ while( temp_number_logical_processors != 0 )
+ {
+ temp_number_logical_processors >>= 1;
+ smallest_power_of_two_larger_than_or_equal_to_number_logical_processors <<= 1;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_freelist_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ea = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_freelist_element *) * LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS * smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_freelist_init_valid_on_current_logical_core( fs, ea, smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, NULL );
+
+ /* TRD : now figure out how many elements are needed from each NUMA node
+ allocate them all
+ them push them interleaved, round-robin, to the freelist
+ */
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMBER_OF_NODE_TYPE, (void *) (lfds710_pal_uint_t) LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA, (void *) &number_numa_nodes );
+
+ fe_array_pointers = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds710_freelist_element *) * number_numa_nodes, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ fe_array_count = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(lfds710_pal_uint_t) * number_numa_nodes, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_numa_nodes ; loop++ )
+ fe_array_count[loop] = 0;
+
+ // TRD : now query the freelist for the EL size
+ lfds710_freelist_query( fs, LFDS710_FREELIST_QUERY_GET_ELIMINATION_ARRAY_EXTRA_ELEMENTS_IN_FREELIST_ELEMENTS, NULL, (void *) &ea_size_in_freelist_elements );
+
+ // TRD : we need to divide that number of elements over the NUMA nodes in proportion to their number of LPs...
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP, plus for the node the correct proportion of the EA layer)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ // TRD : blind +1 to deal with rounding, it will skew results but only slightly
+ fe_array_count[index] = number_logical_processors_in_numa_node + (ea_size_in_freelist_elements * number_logical_processors_in_numa_node) / number_logical_processors + 1;
+ fe_array_pointers[index] = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct lfds710_freelist_element) * fe_array_count[index], LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ index++;
+ }
+
+ while( finished_flag == LOWERED )
+ {
+ for( loop = 0 ; loop < index ; loop++ )
+ if( fe_array_count[loop] > 0 )
+ lfds710_freelist_push( fs, &fe_array_pointers[loop][ fe_array_count[loop]-- ], &psts );
+
+ finished_flag = RAISED;
+
+ for( loop = 0 ; loop < index ; loop++ )
+ if( fe_array_count[loop] > 0 )
+ finished_flag = LOWERED;
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LFDS710_PRNG_ST_GENERATE( psts, random_value );
+ LFDS710_PRNG_ST_MIXING_FUNCTION( random_value );
+ lfds710_prng_st_init( &ptbs->psts, random_value );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ /* TRD : freelist state in the NUMA node with most threads from the current set
+ or, if equal threads, with lowest NUMA
+ all elements alloced from that node as well
+
+ SO much easier to figure out allocs than with NUMA OMG
+ all of this code needs rewriting
+ and the NUMA-but-not-used stuff is interesting but I don't think it carries its own weight
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ temp_number_logical_processors = number_logical_processors >> 2;
+ while( temp_number_logical_processors != 0 )
+ {
+ temp_number_logical_processors >>= 1;
+ smallest_power_of_two_larger_than_or_equal_to_number_logical_processors <<= 1;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_freelist_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ea = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_freelist_element *) * LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS * smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_freelist_init_valid_on_current_logical_core( fs, ea, smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, NULL );
+
+ // TRD : fill the elimination array and have one element per thread in the freelist proper
+ number_freelist_elements = (smallest_power_of_two_larger_than_or_equal_to_number_logical_processors * number_freelist_element_pointers_per_atomic_isolation) + number_logical_processors;
+ fe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_freelist_element) * number_freelist_elements, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_freelist_elements ; loop++ )
+ lfds710_freelist_push( fs, &fe[loop], &psts );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LFDS710_PRNG_ST_GENERATE( psts, random_value );
+ LFDS710_PRNG_ST_MIXING_FUNCTION( random_value );
+ lfds710_prng_st_init( &ptbs->psts, random_value );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ obs->fs = fs;
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct lfds710_freelist_state
+ *fs;
+
+ struct lfds710_freelist_element
+ *fe;
+
+ struct libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ fs = obs->fs;
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ lfds710_freelist_pop( fs, &fe, &ptbs->psts );
+ lfds710_freelist_push( fs, fe, &ptbs->psts );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST,
+ LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1,
+ LIBBENCHMARK_LOCK_ID_LIBLFDS710_LOCKFREE,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;
+
+ lfds710_freelist_cleanup( obs->fs, NULL );
+
+ return;
+}
+
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_freelist_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_freelist_msvc_spinlock_element
+ *fe;
+
+ struct libbenchmark_datastructure_freelist_msvc_spinlock_state
+ *fs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ fs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_msvc_spinlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_msvc_spinlock_init( fs, NULL );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_msvc_spinlock_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_msvc_spinlock_push( fs, &fe[loop] );
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ /* TRD : init the freelist from the NUMA node with most processors from the current set
+ or, if equal threads, with lowest NUMA
+ iterate over the NUMA node list
+ for each NUMA node, allocate one freelist element per thread on that node
+ and push those elements onto the stack
+
+ the loop over the threads, and give each one the freelist state as it's user state
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_msvc_spinlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_msvc_spinlock_init( fs, NULL );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ fe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_msvc_spinlock_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_freelist_msvc_spinlock_push( fs, &fe[loop] );
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ /* TRD : freelist state in the NUMA node with most threads from the current set
+ or, if equal threads, with lowest NUMA
+ all elements alloced from that node as well
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_msvc_spinlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_msvc_spinlock_init( fs, NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_msvc_spinlock_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_msvc_spinlock_push( fs, &fe[loop] );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ tsets->users_threadset_state = fs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_freelist_msvc_spinlock_state
+ *fs;
+
+ struct libbenchmark_datastructure_freelist_msvc_spinlock_element
+ *fe;
+
+ struct libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ fs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_freelist_msvc_spinlock_pop( fs, &fe );
+ libbenchmark_datastructure_freelist_msvc_spinlock_push( fs, fe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_freelist_msvc_spinlock_state
+ *fs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST,
+ LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1,
+ LIBBENCHMARK_LOCK_ID_MSVC_SPINLOCK,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ fs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_freelist_msvc_spinlock_cleanup( fs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_freelist_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_freelist_pthread_mutex_element
+ *fe;
+
+ struct libbenchmark_datastructure_freelist_pthread_mutex_state
+ *fs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ fs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_pthread_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_pthread_mutex_init( fs, NULL );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_pthread_mutex_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_pthread_mutex_push( fs, &fe[loop] );
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ /* TRD : init the freelist from the NUMA node with most processors from the current set
+ or, if equal threads, with lowest NUMA
+ iterate over the NUMA node list
+ for each NUMA node, allocate one freelist element per thread on that node
+ and push those elements onto the stack
+
+ the loop over the threads, and give each one the freelist state as it's user state
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_pthread_mutex_init( fs, NULL );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ fe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_mutex_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_freelist_pthread_mutex_push( fs, &fe[loop] );
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ /* TRD : freelist state in the NUMA node with most threads from the current set
+ or, if equal threads, with lowest NUMA
+ all elements alloced from that node as well
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_pthread_mutex_init( fs, NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_mutex_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_pthread_mutex_push( fs, &fe[loop] );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ tsets->users_threadset_state = fs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_freelist_pthread_mutex_state
+ *fs;
+
+ struct libbenchmark_datastructure_freelist_pthread_mutex_element
+ *fe;
+
+ struct libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ fs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_freelist_pthread_mutex_pop( fs, &fe );
+ libbenchmark_datastructure_freelist_pthread_mutex_push( fs, fe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_freelist_pthread_mutex_state
+ *fs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST,
+ LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_MUTEX,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ fs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_freelist_pthread_mutex_cleanup( fs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_freelist_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_freelist_pthread_rwlock_element
+ *fe;
+
+ struct libbenchmark_datastructure_freelist_pthread_rwlock_state
+ *fs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ fs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_pthread_rwlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_pthread_rwlock_init( fs, NULL );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_pthread_rwlock_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_pthread_rwlock_push( fs, &fe[loop] );
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ /* TRD : init the freelist from the NUMA node with most processors from the current set
+ or, if equal threads, with lowest NUMA
+ iterate over the NUMA node list
+ for each NUMA node, allocate one freelist element per thread on that node
+ and push those elements onto the stack
+
+ the loop over the threads, and give each one the freelist state as it's user state
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_rwlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_pthread_rwlock_init( fs, NULL );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ fe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_rwlock_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_freelist_pthread_rwlock_push( fs, &fe[loop] );
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ /* TRD : freelist state in the NUMA node with most threads from the current set
+ or, if equal threads, with lowest NUMA
+ all elements alloced from that node as well
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_rwlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_pthread_rwlock_init( fs, NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_rwlock_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_pthread_rwlock_push( fs, &fe[loop] );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ tsets->users_threadset_state = fs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_freelist_pthread_rwlock_state
+ *fs;
+
+ struct libbenchmark_datastructure_freelist_pthread_rwlock_element
+ *fe;
+
+ struct libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ fs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_freelist_pthread_rwlock_pop( fs, &fe );
+ libbenchmark_datastructure_freelist_pthread_rwlock_push( fs, fe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_freelist_pthread_rwlock_state
+ *fs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST,
+ LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_RWLOCK,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ fs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_freelist_pthread_rwlock_cleanup( fs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_freelist_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element
+ *fe;
+
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state
+ *fs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ fs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_private_init( fs, NULL );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_private_push( fs, &fe[loop] );
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ /* TRD : init the freelist from the NUMA node with most processors from the current set
+ or, if equal threads, with lowest NUMA
+ iterate over the NUMA node list
+ for each NUMA node, allocate one freelist element per thread on that node
+ and push those elements onto the stack
+
+ the loop over the threads, and give each one the freelist state as it's user state
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_private_init( fs, NULL );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ fe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_private_push( fs, &fe[loop] );
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ /* TRD : freelist state in the NUMA node with most threads from the current set
+ or, if equal threads, with lowest NUMA
+ all elements alloced from that node as well
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_private_init( fs, NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_private_push( fs, &fe[loop] );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ tsets->users_threadset_state = fs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state
+ *fs;
+
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element
+ *fe;
+
+ struct libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ fs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_private_pop( fs, &fe );
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_private_push( fs, fe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state
+ *fs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST,
+ LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_PRIVATE,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ fs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_private_cleanup( fs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_freelist_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element
+ *fe;
+
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state
+ *fs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ fs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_init( fs, NULL );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_push( fs, &fe[loop] );
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ /* TRD : init the freelist from the NUMA node with most processors from the current set
+ or, if equal threads, with lowest NUMA
+ iterate over the NUMA node list
+ for each NUMA node, allocate one freelist element per thread on that node
+ and push those elements onto the stack
+
+ the loop over the threads, and give each one the freelist state as it's user state
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_init( fs, NULL );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ fe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_push( fs, &fe[loop] );
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ /* TRD : freelist state in the NUMA node with most threads from the current set
+ or, if equal threads, with lowest NUMA
+ all elements alloced from that node as well
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_init( fs, NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_push( fs, &fe[loop] );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ tsets->users_threadset_state = fs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state
+ *fs;
+
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element
+ *fe;
+
+ struct libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ fs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_pop( fs, &fe );
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_push( fs, fe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state
+ *fs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST,
+ LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_SHARED,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ fs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_cleanup( fs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_freelist_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_freelist_windows_critical_section_element
+ *fe;
+
+ struct libbenchmark_datastructure_freelist_windows_critical_section_state
+ *fs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ fs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_windows_critical_section_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_windows_critical_section_init( fs, NULL );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_windows_critical_section_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_windows_critical_section_push( fs, &fe[loop] );
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ /* TRD : init the freelist from the NUMA node with most processors from the current set
+ or, if equal threads, with lowest NUMA
+ iterate over the NUMA node list
+ for each NUMA node, allocate one freelist element per thread on that node
+ and push those elements onto the stack
+
+ the loop over the threads, and give each one the freelist state as it's user state
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_windows_critical_section_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_windows_critical_section_init( fs, NULL );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ fe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_windows_critical_section_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_freelist_windows_critical_section_push( fs, &fe[loop] );
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ /* TRD : freelist state in the NUMA node with most threads from the current set
+ or, if equal threads, with lowest NUMA
+ all elements alloced from that node as well
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_windows_critical_section_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_windows_critical_section_init( fs, NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_windows_critical_section_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_windows_critical_section_push( fs, &fe[loop] );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ tsets->users_threadset_state = fs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_freelist_windows_critical_section_state
+ *fs;
+
+ struct libbenchmark_datastructure_freelist_windows_critical_section_element
+ *fe;
+
+ struct libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ fs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_freelist_windows_critical_section_pop( fs, &fe );
+ libbenchmark_datastructure_freelist_windows_critical_section_push( fs, fe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_freelist_windows_critical_section_state
+ *fs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST,
+ LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1,
+ LIBBENCHMARK_LOCK_ID_WINDOWS_CRITICAL_SECTION,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ fs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_freelist_windows_critical_section_cleanup( fs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_freelist_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_freelist_windows_mutex_element
+ *fe;
+
+ struct libbenchmark_datastructure_freelist_windows_mutex_state
+ *fs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ fs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_windows_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_windows_mutex_init( fs, NULL );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_freelist_windows_mutex_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_windows_mutex_push( fs, &fe[loop] );
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ /* TRD : init the freelist from the NUMA node with most processors from the current set
+ or, if equal threads, with lowest NUMA
+ iterate over the NUMA node list
+ for each NUMA node, allocate one freelist element per thread on that node
+ and push those elements onto the stack
+
+ the loop over the threads, and give each one the freelist state as it's user state
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_windows_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_windows_mutex_init( fs, NULL );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ fe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_windows_mutex_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_freelist_windows_mutex_push( fs, &fe[loop] );
+ }
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ /* TRD : freelist state in the NUMA node with most threads from the current set
+ or, if equal threads, with lowest NUMA
+ all elements alloced from that node as well
+ */
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ fs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_windows_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_freelist_windows_mutex_init( fs, NULL );
+
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ fe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_freelist_windows_mutex_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ libbenchmark_datastructure_freelist_windows_mutex_push( fs, &fe[loop] );
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+ }
+
+ tsets->users_threadset_state = fs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_freelist_windows_mutex_state
+ *fs;
+
+ struct libbenchmark_datastructure_freelist_windows_mutex_element
+ *fe;
+
+ struct libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ fs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_freelist_windows_mutex_pop( fs, &fe );
+ libbenchmark_datastructure_freelist_windows_mutex_push( fs, fe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_freelist_windows_mutex_state
+ *fs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST,
+ LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1,
+ LIBBENCHMARK_LOCK_ID_WINDOWS_MUTEX,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ fs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_freelist_windows_mutex_cleanup( fs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_queue_umm_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state
+ *qs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ qs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ qe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_init( qs, &qe[0], NULL );
+ for( loop = 1 ; loop < (number_logical_processors+1) ; loop++ )
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_enqueue_umm( qs, &qe[loop] );
+ // TRD : now the per-thread states
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ qe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ // TRD : everything allocates from the queue_umm state NUMA node
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ } break;
+ }
+
+ tsets->users_threadset_state = qs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state
+ *qs;
+
+ struct libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ qs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_dequeue_umm( qs, &qe );
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_enqueue_umm( qs, qe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state
+ *qs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM,
+ LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1,
+ LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_ATOMIC,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ qs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_cleanup( qs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_queue_umm_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state
+ *qs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ qs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ qe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_init( qs, &qe[0], NULL );
+ for( loop = 1 ; loop < (number_logical_processors+1) ; loop++ )
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_enqueue_umm( qs, &qe[loop] );
+ // TRD : now the per-thread states
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ qe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ // TRD : everything allocates from the queue_umm state NUMA node
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ } break;
+ }
+
+ tsets->users_threadset_state = qs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state
+ *qs;
+
+ struct libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ qs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_dequeue_umm( qs, &qe );
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_enqueue_umm( qs, qe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state
+ *qs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM,
+ LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1,
+ LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_SYNC,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ qs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_cleanup( qs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_queue_umm_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+struct libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_overall_benchmark_state
+{
+ struct lfds700_queue_state
+ *qs;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds700_misc_prng_state
+ ps;
+
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct lfds700_queue_element
+ *qe;
+
+ struct lfds700_queue_state
+ *qs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ lfds700_misc_prng_init( &ps );
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ qs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds700_queue_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ qe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds700_queue_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds700_queue_init_valid_on_current_logical_core( qs, &qe[0], &ps, NULL );
+ for( loop = 1 ; loop < (number_logical_processors+1) ; loop++ )
+ lfds700_queue_enqueue( qs, &qe[loop], &ps );
+ // TRD : now the per-thread states
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds700_queue_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds700_queue_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds700_queue_init_valid_on_current_logical_core( qs, qe, &ps, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ qe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct lfds700_queue_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ lfds700_queue_enqueue( qs, &qe[loop], &ps );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds700_queue_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds700_queue_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds700_queue_init_valid_on_current_logical_core( qs, qe, &ps, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ // TRD : everything allocates from the queue_umm state NUMA node
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds700_queue_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ lfds700_queue_enqueue( qs, &qe[loop], &ps );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ } break;
+ }
+
+ obs->qs = qs;
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ struct lfds700_misc_prng_state
+ ps;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct lfds700_queue_element
+ *qe;
+
+ struct lfds700_queue_state
+ *qs;
+
+ struct libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ qs = obs->qs;
+
+ lfds700_misc_prng_init( &ps );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ lfds700_queue_dequeue( qs, &qe, &ps );
+ lfds700_queue_enqueue( qs, qe, &ps );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM,
+ LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1,
+ LIBBENCHMARK_LOCK_ID_LIBLFDS700_LOCKFREE,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;
+
+ lfds700_queue_cleanup( obs->qs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_queue_umm_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+struct libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_overall_benchmark_state
+{
+ struct lfds710_queue_umm_state
+ *qs;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct lfds710_queue_umm_element
+ *qe;
+
+ struct lfds710_queue_umm_state
+ *qs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ obs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_overall_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ qs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds710_queue_umm_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ qe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds710_queue_umm_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_queue_umm_init_valid_on_current_logical_core( qs, &qe[0], NULL );
+ for( loop = 1 ; loop < (number_logical_processors+1) ; loop++ )
+ lfds710_queue_umm_enqueue( qs, &qe[loop] );
+ // TRD : now the per-thread states
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_queue_umm_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_queue_umm_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_queue_umm_init_valid_on_current_logical_core( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ qe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct lfds710_queue_umm_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ lfds710_queue_umm_enqueue( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_queue_umm_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_queue_umm_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_queue_umm_init_valid_on_current_logical_core( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ // TRD : everything allocates from the queue_umm state NUMA node
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct lfds710_queue_umm_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ lfds710_queue_umm_enqueue( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ } break;
+ }
+
+ obs->qs = qs;
+ tsets->users_threadset_state = obs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct lfds710_queue_umm_element
+ *qe;
+
+ struct lfds710_queue_umm_state
+ *qs;
+
+ struct libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ obs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+ qs = obs->qs;
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ lfds710_queue_umm_dequeue( qs, &qe );
+ lfds710_queue_umm_enqueue( qs, qe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_overall_benchmark_state
+ *obs;
+
+ struct libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM,
+ LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1,
+ LIBBENCHMARK_LOCK_ID_LIBLFDS710_LOCKFREE,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ obs = tsets->users_threadset_state;
+
+ lfds710_queue_umm_cleanup( obs->qs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_queue_umm_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state
+ *qs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ qs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ qe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_msvc_spinlock_init( qs, &qe[0], NULL );
+ for( loop = 1 ; loop < (number_logical_processors+1) ; loop++ )
+ libbenchmark_datastructure_queue_umm_msvc_spinlock_enqueue_umm( qs, &qe[loop] );
+ // TRD : now the per-thread states
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_msvc_spinlock_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ qe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_msvc_spinlock_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_msvc_spinlock_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ // TRD : everything allocates from the queue_umm state NUMA node
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_msvc_spinlock_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ } break;
+ }
+
+ tsets->users_threadset_state = qs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state
+ *qs;
+
+ struct libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ qs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_queue_umm_msvc_spinlock_dequeue_umm( qs, &qe );
+ libbenchmark_datastructure_queue_umm_msvc_spinlock_enqueue_umm( qs, qe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state
+ *qs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM,
+ LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1,
+ LIBBENCHMARK_LOCK_ID_MSVC_SPINLOCK,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ qs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_queue_umm_msvc_spinlock_cleanup( qs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_queue_umm_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_mutex_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_mutex_state
+ *qs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ qs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ qe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_mutex_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_pthread_mutex_init( qs, &qe[0], NULL );
+ for( loop = 1 ; loop < (number_logical_processors+1) ; loop++ )
+ libbenchmark_datastructure_queue_umm_pthread_mutex_enqueue_umm( qs, &qe[loop] );
+ // TRD : now the per-thread states
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_mutex_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_pthread_mutex_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ qe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_mutex_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_pthread_mutex_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_mutex_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_pthread_mutex_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ // TRD : everything allocates from the queue_umm state NUMA node
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_mutex_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_pthread_mutex_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ } break;
+ }
+
+ tsets->users_threadset_state = qs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_mutex_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_mutex_state
+ *qs;
+
+ struct libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ qs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_queue_umm_pthread_mutex_dequeue_umm( qs, &qe );
+ libbenchmark_datastructure_queue_umm_pthread_mutex_enqueue_umm( qs, qe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_queue_umm_pthread_mutex_state
+ *qs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM,
+ LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_MUTEX,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ qs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_queue_umm_pthread_mutex_cleanup( qs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_queue_umm_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state
+ *qs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ qs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ qe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_pthread_rwlock_init( qs, &qe[0], NULL );
+ for( loop = 1 ; loop < (number_logical_processors+1) ; loop++ )
+ libbenchmark_datastructure_queue_umm_pthread_rwlock_enqueue_umm( qs, &qe[loop] );
+ // TRD : now the per-thread states
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_pthread_rwlock_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ qe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_pthread_rwlock_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_pthread_rwlock_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ // TRD : everything allocates from the queue_umm state NUMA node
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_pthread_rwlock_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ } break;
+ }
+
+ tsets->users_threadset_state = qs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state
+ *qs;
+
+ struct libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ qs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_queue_umm_pthread_rwlock_dequeue_umm( qs, &qe );
+ libbenchmark_datastructure_queue_umm_pthread_rwlock_enqueue_umm( qs, qe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state
+ *qs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM,
+ LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_RWLOCK,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ qs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_queue_umm_pthread_rwlock_cleanup( qs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_queue_umm_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state
+ *qs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ qs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ qe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_init( qs, &qe[0], NULL );
+ for( loop = 1 ; loop < (number_logical_processors+1) ; loop++ )
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_enqueue_umm( qs, &qe[loop] );
+ // TRD : now the per-thread states
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ qe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ // TRD : everything allocates from the queue_umm state NUMA node
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ } break;
+ }
+
+ tsets->users_threadset_state = qs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state
+ *qs;
+
+ struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ qs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_dequeue_umm( qs, &qe );
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_enqueue_umm( qs, qe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state
+ *qs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM,
+ LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_PRIVATE,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ qs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_cleanup( qs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_queue_umm_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state
+ *qs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ qs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ qe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_init( qs, &qe[0], NULL );
+ for( loop = 1 ; loop < (number_logical_processors+1) ; loop++ )
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_enqueue_umm( qs, &qe[loop] );
+ // TRD : now the per-thread states
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ qe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ // TRD : everything allocates from the queue_umm state NUMA node
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ } break;
+ }
+
+ tsets->users_threadset_state = qs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state
+ *qs;
+
+ struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ qs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_dequeue_umm( qs, &qe );
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_enqueue_umm( qs, qe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state
+ *qs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM,
+ LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1,
+ LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_SHARED,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ qs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_cleanup( qs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_queue_umm_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_queue_umm_windows_critical_section_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_windows_critical_section_state
+ *qs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ qs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_windows_critical_section_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ qe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_windows_critical_section_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_windows_critical_section_init( qs, &qe[0], NULL );
+ for( loop = 1 ; loop < (number_logical_processors+1) ; loop++ )
+ libbenchmark_datastructure_queue_umm_windows_critical_section_enqueue_umm( qs, &qe[loop] );
+ // TRD : now the per-thread states
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_windows_critical_section_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_windows_critical_section_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_windows_critical_section_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ qe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_windows_critical_section_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_windows_critical_section_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_windows_critical_section_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_windows_critical_section_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_windows_critical_section_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ // TRD : everything allocates from the queue_umm state NUMA node
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_windows_critical_section_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_windows_critical_section_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ } break;
+ }
+
+ tsets->users_threadset_state = qs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_queue_umm_windows_critical_section_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_windows_critical_section_state
+ *qs;
+
+ struct libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ qs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_queue_umm_windows_critical_section_dequeue_umm( qs, &qe );
+ libbenchmark_datastructure_queue_umm_windows_critical_section_enqueue_umm( qs, qe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_queue_umm_windows_critical_section_state
+ *qs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM,
+ LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1,
+ LIBBENCHMARK_LOCK_ID_WINDOWS_CRITICAL_SECTION,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ qs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_queue_umm_windows_critical_section_cleanup( qs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarks_queue_umm_internal.h"
+
+/***** structs *****/
+struct libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_per_thread_benchmark_state
+{
+ lfds710_pal_uint_t
+ operation_count;
+};
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_init( struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_threadset_state *tsets )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ number_logical_processors_in_numa_node,
+ largest_number_logical_processors_in_numa_node = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_lp = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_datastructure_queue_umm_windows_mutex_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_windows_mutex_state
+ *qs = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns,
+ *largest_pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *numa_node_for_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ libbenchmark_threadset_init( tsets, ts, logical_processor_set, ms, libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_thread, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ qs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_windows_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+ qe = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_datastructure_queue_umm_windows_mutex_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_windows_mutex_init( qs, &qe[0], NULL );
+ for( loop = 1 ; loop < (number_logical_processors+1) ; loop++ )
+ libbenchmark_datastructure_queue_umm_windows_mutex_enqueue_umm( qs, &qe[loop] );
+ // TRD : now the per-thread states
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_windows_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_windows_mutex_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_windows_mutex_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ qe = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_windows_mutex_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_windows_mutex_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ lfds710_list_aso_query( logical_processor_set, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_logical_processors );
+
+ // TRD : get the NUMA node for the queue_umm state
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ if( number_logical_processors_in_numa_node > largest_number_logical_processors_in_numa_node )
+ largest_pns = pns;
+ }
+
+ qs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_windows_mutex_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_windows_mutex_element) * (number_logical_processors+1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_datastructure_queue_umm_windows_mutex_init( qs, qe, NULL );
+
+ /* TRD : for each NUMA node, alloc one element per thread in that NUMA node (from the current thread set)
+ the dummy element comes from the same node as the queue_umm state and has already been done
+ */
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ /* TRD : for each NUMA node, figure out how many LPs in the current set are in that NUMA node
+ and allocate then the correct number of elements from this NUMA node (1 per LP)
+ */
+
+ lasue_lp = NULL;
+ number_logical_processors_in_numa_node = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue_lp) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lp );
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, pts->tns_lp, &numa_node_for_lp );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*numa_node_for_lp) == pns->numa_node_id )
+ number_logical_processors_in_numa_node++;
+ }
+
+ // TRD : everything allocates from the queue_umm state NUMA node
+ qe = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_datastructure_queue_umm_windows_mutex_element) * number_logical_processors_in_numa_node, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ for( loop = 0 ; loop < number_logical_processors_in_numa_node ; loop++ )
+ libbenchmark_datastructure_queue_umm_windows_mutex_enqueue_umm( qs, &qe[loop] );
+ }
+
+ // TRD : now the per-thread states
+
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ ptbs = libshared_memory_alloc_from_specific_node( ms, largest_pns->numa_node_id, sizeof(struct libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_per_thread_benchmark_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts->users_per_thread_state = ptbs;
+ } break;
+ }
+
+ tsets->users_threadset_state = qs;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_thread( void *libbenchmark_threadset_per_thread_state )
+{
+ int long long unsigned
+ current_time = 0,
+ end_time,
+ time_units_per_second;
+
+ lfds710_pal_uint_t
+ operation_count = 0,
+ time_loop = 0;
+
+ struct libbenchmark_datastructure_queue_umm_windows_mutex_element
+ *qe;
+
+ struct libbenchmark_datastructure_queue_umm_windows_mutex_state
+ *qs;
+
+ struct libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libbenchmark_threadset_per_thread_state != NULL );
+
+ pts = (struct libbenchmark_threadset_per_thread_state *) libbenchmark_threadset_per_thread_state;
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+ qs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_OVERALL_STATE( *pts );
+
+ LIBBENCHMARK_PAL_TIME_UNITS_PER_SECOND( &time_units_per_second );
+
+ libbenchmark_threadset_thread_ready_and_wait( pts );
+
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+
+ end_time = current_time + time_units_per_second * libbenchmark_globals_benchmark_duration_in_seconds;
+
+ while( current_time < end_time )
+ {
+ libbenchmark_datastructure_queue_umm_windows_mutex_dequeue_umm( qs, &qe );
+ libbenchmark_datastructure_queue_umm_windows_mutex_enqueue_umm( qs, qe );
+ operation_count++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ LIBBENCHMARK_PAL_GET_HIGHRES_TIME( ¤t_time );
+ }
+ }
+
+ ptbs->operation_count = operation_count;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_cleanup( struct lfds710_list_aso_state *logical_processor_set,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_results_state *rs,
+ struct libbenchmark_threadset_state *tsets )
+{
+ struct libbenchmark_datastructure_queue_umm_windows_mutex_state
+ *qs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_per_thread_benchmark_state
+ *ptbs;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ ptbs = LIBBENCHMARK_THREADSET_PER_THREAD_STATE_GET_USERS_PER_THREAD_STATE( *pts );
+
+ libbenchmark_results_put_result( rs,
+ LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM,
+ LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1,
+ LIBBENCHMARK_LOCK_ID_WINDOWS_MUTEX,
+ numa_mode,
+ logical_processor_set,
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp ),
+ LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp ),
+ ptbs->operation_count );
+ }
+
+ qs = tsets->users_threadset_state;
+
+ libbenchmark_datastructure_queue_umm_windows_mutex_cleanup( qs, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarkset_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmarkset_add_benchmark( struct libbenchmark_benchmarkset_state *bss, struct libbenchmark_benchmarkinstance_state *bs )
+{
+ LFDS710_PAL_ASSERT( bss != NULL );
+ LFDS710_PAL_ASSERT( bs != NULL );
+
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( bs->lasue, bs );
+ lfds710_list_asu_insert_at_end( &bss->benchmarks, &bs->lasue );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarkset_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libbenchmark_benchmarkset_cleanup( struct libbenchmark_benchmarkset_state *bsets )
+{
+ LFDS710_PAL_ASSERT( bsets != NULL );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarkset_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmarkset_gnuplot_emit( struct libbenchmark_benchmarkset_state *bsets,
+ struct libbenchmark_results_state *rs,
+ char *gnuplot_system_string,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct libbenchmark_gnuplot_options *gpo,
+ struct libbenchmark_benchmarkset_gnuplot *bg )
+{
+ char
+ png_filename[512],
+ temp_string[64],
+ *topology_string;
+
+ char const
+ *libbenchmark_version_and_build_string,
+ *liblfds_version_and_build_string,
+ *libshared_version_and_build_string;
+
+ enum flag
+ found_flag;
+
+ int
+ rv;
+
+ lfds710_pal_uint_t
+ count,
+ greatest_digit = 0,
+ greatest_result = 0,
+ length,
+ loop,
+ longest_line_length = 0,
+ result,
+ second_greatest_digit,
+ temp_greatest_result,
+ title_line_lengths[5],
+ y_max = 0, // TRD : to remove compiler warning
+ number_logical_cores,
+ number_benchmarks,
+ number_lp_sets,
+ topology_string_length,
+ one_block_xticks,
+ one_block_plot,
+ one_block_titles,
+ one_block_numeric_data,
+ one_inner_block,
+ one_outer_block,
+ one_block,
+ total_length_in_bytes;
+
+ struct libbenchmark_benchmarkinstance_state
+ *bs;
+
+ struct lfds710_btree_au_element
+ *baue,
+ *baue_inner,
+ *baue_temp;
+
+ struct lfds710_list_asu_element
+ *lasue_benchmarks,
+ *lasue_benchmarks_outer,
+ *lasue_lpset,
+ *lasue_temp;
+
+ struct lfds710_list_aso_element
+ *lasoe,
+ *lasoe_inner;
+
+ struct lfds710_list_aso_state
+ *logical_processor_set;
+
+ struct libbenchmark_topology_node_state
+ *tns,
+ *tns_results,
+ *tns_inner = NULL; // TRD : to remove compiler warning
+
+ LFDS710_PAL_ASSERT( bsets != NULL );
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( gnuplot_system_string != NULL );
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( gpo != NULL );
+ LFDS710_PAL_ASSERT( bg != NULL );
+
+ bg->datastructure_id = bsets->datastructure_id;
+ bg->benchmark_id = bsets->benchmark_id;
+
+ /* TRD : so, first, we're producing a string
+ so we need to figure out how much store to allocate
+
+ roughly, length is;
+
+ topology string length
+ fixed = 4096
+ one_block_xticks = 64 + 6 * number logical cores
+ one_block_plot = 16 + 40 * number_benchmarks
+ one_block_titles = 32 * number_benchmarks
+ one_block_numeric_data = (16 + number_benchmarks) * number_benchmarks * number logical cores
+ one_inner_block = one_block_titles + one_block_numeric_data + 2
+ one_outer_block = one_block_xticks + one_block_plot
+ one_block = one_inner_block * number_benchmarks + one_outer_block
+
+ (plus one on number_lp_sets for the blank key-only chart)
+ total = topology_string_length + fixed + one_block * (number_lp_sets+1)
+ */
+
+ topology_string = libbenchmark_topology_generate_string( bsets->ts, bsets->ms, LIBBENCHMARK_TOPOLOGY_STRING_FORMAT_GNUPLOT );
+
+ libbenchmark_topology_query( bsets->ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMBER_OF_NODE_TYPE, (void *) (lfds710_pal_uint_t) LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR, &number_logical_cores );
+ lfds710_list_asu_query( &bsets->benchmarks, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, &number_benchmarks );
+ lfds710_list_asu_query( bsets->logical_processor_sets, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_lp_sets );
+
+ topology_string_length = libshared_ansi_strlen( topology_string );
+ one_block_xticks = 64 + 6 * number_logical_cores;
+ one_block_plot = 16 + 40 * number_benchmarks;
+ one_block_titles = 32 * number_benchmarks;
+ one_block_numeric_data = (16 + number_benchmarks) * number_benchmarks * number_logical_cores;
+ one_inner_block = one_block_titles + one_block_numeric_data + 2;
+ one_outer_block = one_block_xticks + one_block_plot;
+ one_block = one_inner_block * number_benchmarks + one_outer_block;
+ total_length_in_bytes = topology_string_length + 4096 + one_block * (number_lp_sets+1);
+
+ bg->gnuplot_string = libshared_memory_alloc_from_most_free_space_node( bsets->ms, total_length_in_bytes, sizeof(char) );
+
+ lfds710_misc_query( LFDS710_MISC_QUERY_GET_BUILD_AND_VERSION_STRING, NULL, (void **) &liblfds_version_and_build_string );
+ libbenchmark_misc_query( LIBBENCHMARK_MISC_QUERY_GET_BUILD_AND_VERSION_STRING, NULL, (void **) &libbenchmark_version_and_build_string );
+ libshared_misc_query( LIBSHARED_MISC_QUERY_GET_BUILD_AND_VERSION_STRING, NULL, (void **) &libshared_version_and_build_string );
+
+ libshared_ansi_strcpy( bg->filename, "liblfds" );
+ libshared_ansi_strcat_number( bg->filename, (lfds710_pal_uint_t) LFDS710_MISC_VERSION_INTEGER );
+ libshared_ansi_strcat( bg->filename, "_" );
+ libshared_ansi_strcat( bg->filename, libbenchmark_globals_datastructure_names[bsets->datastructure_id] );
+ libshared_ansi_strcat( bg->filename, "_" );
+ libshared_ansi_strcat( bg->filename, libbenchmark_globals_benchmark_names[bsets->benchmark_id] );
+ libshared_ansi_strcat( bg->filename, "_" );
+ libshared_ansi_strcat( bg->filename, libbenchmark_globals_numa_mode_names[numa_mode] );
+ libshared_ansi_strcat( bg->filename, "_" );
+ libshared_ansi_strcat( bg->filename, gnuplot_system_string );
+ libshared_ansi_strcat( bg->filename, ".gnuplot" );
+
+ libshared_ansi_strcpy( png_filename, "liblfds" );
+ libshared_ansi_strcat_number( png_filename, (lfds710_pal_uint_t) LFDS710_MISC_VERSION_INTEGER );
+ libshared_ansi_strcat( png_filename, "_" );
+ libshared_ansi_strcat( png_filename, libbenchmark_globals_datastructure_names[bsets->datastructure_id] );
+ libshared_ansi_strcat( png_filename, "_" );
+ libshared_ansi_strcat( png_filename, libbenchmark_globals_benchmark_names[bsets->benchmark_id] );
+ libshared_ansi_strcat( png_filename, "_" );
+ libshared_ansi_strcat( png_filename, libbenchmark_globals_numa_mode_names[numa_mode] );
+ libshared_ansi_strcat( png_filename, "_" );
+ libshared_ansi_strcat( png_filename, gnuplot_system_string );
+ libshared_ansi_strcat( png_filename, ".png" );
+
+ // TRD : now for main gnuplot header
+ libshared_ansi_strcpy( bg->gnuplot_string, "set output \"" );
+ libshared_ansi_strcat( bg->gnuplot_string, png_filename );
+ libshared_ansi_strcat( bg->gnuplot_string, "\"\n" );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "set terminal pngcairo enhanced font \"Courier New,14\" size " );
+
+ if( gpo->width_in_pixels_set_flag == RAISED )
+ libshared_ansi_strcat_number( bg->gnuplot_string, gpo->width_in_pixels );
+
+ // TRD : 300px wide per logical core
+ if( gpo->width_in_pixels_set_flag == LOWERED )
+ libshared_ansi_strcat_number( bg->gnuplot_string, number_logical_cores * 300 );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "," );
+
+ // TRD : height is 300 pixels per chart, plus 300 for the title, plus 300 for the key
+ if( gpo->height_in_pixels_set_flag == RAISED )
+ libshared_ansi_strcat_number( bg->gnuplot_string, gpo->height_in_pixels );
+
+ if( gpo->height_in_pixels_set_flag == LOWERED )
+ libshared_ansi_strcat_number( bg->gnuplot_string, (number_lp_sets+2) * 300 );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "\n" );
+ libshared_ansi_strcat( bg->gnuplot_string, "set multiplot title \"" );
+
+ // TRD : compute longest line in first header part so we can know how many spaces needed for right padding on each line
+ title_line_lengths[0] = libshared_ansi_strlen( "data structure : " ) + libshared_ansi_strlen( libbenchmark_globals_datastructure_names[bsets->datastructure_id] );
+ title_line_lengths[1] = libshared_ansi_strlen( "benchmark : " ) + libshared_ansi_strlen( libbenchmark_globals_benchmark_names[bsets->benchmark_id] );
+ title_line_lengths[2] = libshared_ansi_strlen( "numa mode : " ) + libshared_ansi_strlen( libbenchmark_globals_numa_mode_names[numa_mode] );
+ temp_string[0] = '\0';
+ libshared_ansi_strcat_number( temp_string, libbenchmark_globals_benchmark_duration_in_seconds );
+ title_line_lengths[3] = libshared_ansi_strlen( "duration : seconds(s)" ) + libshared_ansi_strlen( temp_string );
+ title_line_lengths[4] = libshared_ansi_strlen( "system : " ) + libshared_ansi_strlen( gnuplot_system_string );
+
+ for( loop = 0 ; loop < 5 ; loop++ )
+ if( title_line_lengths[loop] > longest_line_length )
+ longest_line_length = title_line_lengths[loop];
+
+ // TRD : now emit
+ libshared_ansi_strcat( bg->gnuplot_string, "data structure : " );
+ libshared_ansi_strcat( bg->gnuplot_string, libbenchmark_globals_datastructure_names[bsets->datastructure_id] );
+ for( loop = 0 ; loop < longest_line_length - title_line_lengths[0] ; loop++ )
+ libshared_ansi_strcat( bg->gnuplot_string, " " );
+ libshared_ansi_strcat( bg->gnuplot_string, "\\n\\\n" );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "benchmark : " );
+ libshared_ansi_strcat( bg->gnuplot_string, libbenchmark_globals_benchmark_names[bsets->benchmark_id] );
+ for( loop = 0 ; loop < longest_line_length - title_line_lengths[1] ; loop++ )
+ libshared_ansi_strcat( bg->gnuplot_string, " " );
+ libshared_ansi_strcat( bg->gnuplot_string, "\\n\\\n" );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "numa mode : " );
+ libshared_ansi_strcat( bg->gnuplot_string, libbenchmark_globals_numa_mode_names[numa_mode] );
+ for( loop = 0 ; loop < longest_line_length - title_line_lengths[2] ; loop++ )
+ libshared_ansi_strcat( bg->gnuplot_string, " " );
+ libshared_ansi_strcat( bg->gnuplot_string, "\\n\\\n" );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "duration : " );
+ temp_string[0] = '\0';
+ libshared_ansi_strcat_number( bg->gnuplot_string, libbenchmark_globals_benchmark_duration_in_seconds );
+ libshared_ansi_strcat( bg->gnuplot_string, " seconds(s)" );
+ for( loop = 0 ; loop < longest_line_length - title_line_lengths[3] ; loop++ )
+ libshared_ansi_strcat( bg->gnuplot_string, " " );
+ libshared_ansi_strcat( bg->gnuplot_string, "\\n\\\n" );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "system : " );
+ libshared_ansi_strcat( bg->gnuplot_string, gnuplot_system_string );
+ for( loop = 0 ; loop < longest_line_length - title_line_lengths[4] ; loop++ )
+ libshared_ansi_strcat( bg->gnuplot_string, " " );
+ libshared_ansi_strcat( bg->gnuplot_string, "\\n\\\n" );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "\\n\\\n" );
+ libshared_ansi_strcat( bg->gnuplot_string, topology_string );
+ libshared_ansi_strcat( bg->gnuplot_string, "\\n\\\n" );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "Y axis = ops/sec, X axis = logical cores in use\\n\\\n" );
+ libshared_ansi_strcat( bg->gnuplot_string, "\\n\\\n" );
+ libshared_ansi_strcat( bg->gnuplot_string, (char *) liblfds_version_and_build_string );
+ libshared_ansi_strcat( bg->gnuplot_string, "\\n\\\n" );
+ libshared_ansi_strcat( bg->gnuplot_string, (char *) libshared_version_and_build_string );
+ libshared_ansi_strcat( bg->gnuplot_string, "\\n\\\n" );
+ libshared_ansi_strcat( bg->gnuplot_string, (char *) libbenchmark_version_and_build_string );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "\" layout " );
+ // TRD : +1 for key-only chart
+ libshared_ansi_strcat_number( bg->gnuplot_string, number_lp_sets+1 );
+ libshared_ansi_strcat( bg->gnuplot_string, ",1 rowsfirst noenhanced\n"
+ "set format y \"%.0f\"\n"
+ "set boxwidth 1 absolute\n"
+ "set style data histograms\n"
+ "set style histogram cluster\n"
+ "set style histogram gap 4\n"
+ "set style fill solid border -1\n"
+ "set key autotitle columnheader center top\n"
+ "set noxtics\n"
+ "set noytics\n"
+ "set noborder\n"
+ "set noxlabel\n"
+ "set noylabel\n" );
+
+ /* TRD : we're drawing the plot for benchmarks in one set (i.e. a single logical benchmark, but all the different lock type variants of it)
+ over all of its logical processor sets
+ so we have one chart per logical processor set
+ */
+
+ /* TRD : first, we need to compute the y range
+ this itself requires us to iterate over the result for every core in every test :-)
+ */
+
+ // TRD : loop over every logical processor set
+ lasue_lpset = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*bsets->logical_processor_sets,lasue_lpset) )
+ {
+ logical_processor_set = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lpset );
+
+ // TRD : now loop over every benchmark
+ lasue_benchmarks = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(bsets->benchmarks,lasue_benchmarks) )
+ {
+ bs = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_benchmarks );
+
+ // TRD : now for this processor set, loop over every logical core
+ lasoe = NULL;
+
+ while( LFDS710_LIST_ASO_GET_START_AND_THEN_NEXT(*logical_processor_set, lasoe) )
+ {
+ tns_results = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasoe );
+
+ // TRD : now, finally, let's go shopping
+ libbenchmark_results_get_result( rs,
+ bsets->datastructure_id,
+ bsets->benchmark_id,
+ bs->lock_id,
+ numa_mode,
+ logical_processor_set,
+ tns_results,
+ &result );
+
+ if( result > greatest_result )
+ greatest_result = result;
+ }
+ }
+ }
+
+ if( gpo->y_axis_scale_type == LIBBENCHMARK_GNUPLOT_Y_AXIS_SCALE_TYPE_LINEAR )
+ libshared_ansi_strcat( bg->gnuplot_string, "set yrange [0:" );
+
+ if( gpo->y_axis_scale_type == LIBBENCHMARK_GNUPLOT_Y_AXIS_SCALE_TYPE_LOGARITHMIC )
+ libshared_ansi_strcat( bg->gnuplot_string, "set logscale y\n"
+ "set yrange [1:" );
+
+ /* TRD : for y-range max, we look at the second greatest digit in greatest_result
+ i.e. for 3429111 the second greatest digit is 4
+ if that digit is 0 to 4, inclusive, the y-max value is that digit converted to 5, and everything to the right (smaller values to 0)
+ if that digit is 5 to 9, inclusive, the y-max value is that digit and everything to the right converted to 0, and the greatest digit increased by 1
+ I am assuming I will not exceed a 32-bit unsigned max :-)
+ if the greatest_result is a single digit, we set second_greatest_digit to greatest_digit and greatest_digit to 0, and it works properly
+ */
+
+ temp_greatest_result = greatest_result / libbenchmark_globals_benchmark_duration_in_seconds;
+ length = 0;
+
+ do
+ {
+ second_greatest_digit = greatest_digit;
+ greatest_digit = temp_greatest_result % 10;
+ length++;
+ temp_greatest_result -= greatest_digit;
+ temp_greatest_result /= 10;
+ }
+ while( temp_greatest_result > 0 );
+
+ if( length == 1 )
+ {
+ second_greatest_digit = greatest_digit;
+ greatest_digit = 0;
+ }
+
+ if( second_greatest_digit < 5 )
+ y_max = greatest_digit * 10 + 5;
+
+ if( second_greatest_digit >= 5 )
+ y_max = (greatest_digit+1) * 10;
+
+ if( length >= 2 )
+ for( loop = 0 ; loop < length-2 ; loop++ )
+ y_max *= 10;
+
+ libshared_ansi_strcat_number( bg->gnuplot_string, y_max );
+ libshared_ansi_strcat( bg->gnuplot_string, "]\n" );
+
+ // TRD : now print one empty chart which is just for the key
+
+ libshared_ansi_strcat( bg->gnuplot_string, "plot " );
+
+ lasue_benchmarks = NULL;
+ count = 1;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(bsets->benchmarks,lasue_benchmarks) )
+ {
+ lasue_temp = lasue_benchmarks;
+ lasue_temp = LFDS710_LIST_ASU_GET_NEXT( *lasue_temp );
+
+ libshared_ansi_strcat( bg->gnuplot_string, " '-' using " );
+ libshared_ansi_strcat_number( bg->gnuplot_string, count++ );
+ libshared_ansi_strcat( bg->gnuplot_string, lasue_temp != NULL ? ", \\\n" : "\n" );
+ }
+
+ /* TRD : simpler output for the key-only chart
+ for each benchmark
+ print the title (name of all benchmarks)
+ for number of lp sets
+ print "0 " for number of benchmarks
+ print "e"
+
+ print;
+ set key off
+ set border
+ set ytics mirror
+ */
+
+ lasue_benchmarks_outer = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(bsets->benchmarks,lasue_benchmarks_outer) )
+ {
+ // TRD : now loop over every benchmark and print its lock name, in quotes
+ lasue_benchmarks = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(bsets->benchmarks,lasue_benchmarks) )
+ {
+ bs = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_benchmarks );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "\"" );
+ libshared_ansi_strcat( bg->gnuplot_string, libbenchmark_globals_lock_names[bs->lock_id] );
+ libshared_ansi_strcat( bg->gnuplot_string, "\" " );
+ }
+
+ libshared_ansi_strcat( bg->gnuplot_string, "\n" );
+
+ // TRD : loop over every logical processor set
+ lasue_lpset = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*bsets->logical_processor_sets,lasue_lpset) )
+ {
+ logical_processor_set = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lpset );
+
+ // TRD : now loop over every benchmark and print its lock name, in quotes
+ lasue_benchmarks = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(bsets->benchmarks,lasue_benchmarks) )
+ libshared_ansi_strcat( bg->gnuplot_string, "0 " );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "\n" );
+ }
+
+ libshared_ansi_strcat( bg->gnuplot_string, "e\n" );
+ }
+
+ libshared_ansi_strcat( bg->gnuplot_string, "set key off\n"
+ "set border\n"
+ "set ytics mirror\n" );
+
+ // TRD : now repeat, this time emitting actual charts
+
+ // TRD : loop over every logical processor set
+ lasue_lpset = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*bsets->logical_processor_sets,lasue_lpset) )
+ {
+ logical_processor_set = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lpset );
+
+ /* TRD : emit the chart header; xtics and plot
+ to get the display order right
+ we need to loop over every logical processor in the topology set
+ we then check each of each LP to see if they're in the lpset
+ if so, we print the LP number, otherwise a "-"
+ */
+
+ libshared_ansi_strcat( bg->gnuplot_string, "set xtics 1 out nomirror ( " );
+
+ baue = NULL;
+ count = 0;
+
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&bsets->ts->topology_tree, &baue, LFDS710_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ tns = LFDS710_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue );
+
+ if( tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ {
+ /* TRD : now for this processor set, loop over every logical core and see if lp is in this set
+ if in set, and processor_group is not set, print the processor number, or "-"
+ if in set, and processor_group is set, print "processor number/group number", or "-"
+ */
+
+ lasoe_inner = NULL;
+ found_flag = LOWERED;
+
+ while( found_flag == LOWERED and LFDS710_LIST_ASO_GET_START_AND_THEN_NEXT(*logical_processor_set, lasoe_inner) )
+ {
+ tns_inner = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasoe_inner );
+
+ if( 0 == libbenchmark_topology_node_compare_nodes_function(tns, tns_inner) )
+ found_flag = RAISED;
+ }
+
+ /* TRD : check to see if we're the last element - if so, no trailing comman
+ the final LP is always the smallest element in the tree, so it's always the final element in the tree
+ */
+ baue_temp = baue;
+ lfds710_btree_au_get_by_relative_position( &baue_temp, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE );
+ libshared_ansi_strcat( bg->gnuplot_string, "\"" );
+
+ if( found_flag == RAISED )
+ {
+ libshared_ansi_strcat_number( bg->gnuplot_string, LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER(*tns_inner) );
+
+ if( LIBBENCHMARK_TOPOLOGY_NODE_IS_WINDOWS_GROUP_NUMBER(*tns_inner) )
+ {
+ libshared_ansi_strcat( bg->gnuplot_string, "/" );
+ libshared_ansi_strcat_number( bg->gnuplot_string, LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER(*tns_inner) );
+ }
+ }
+
+ if( found_flag == LOWERED )
+ libshared_ansi_strcat( bg->gnuplot_string, "-" );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "\" " );
+ libshared_ansi_strcat_number( bg->gnuplot_string, count++ );
+ libshared_ansi_strcat( bg->gnuplot_string, baue_temp != NULL ? ", " : " " );
+ }
+ }
+
+ libshared_ansi_strcat( bg->gnuplot_string, " )\n" );
+
+ /* TRD : now the plot command
+
+ christ, I need an API for this, to build up the plot in memory and then dump it out
+ this hardcoded in-line output is insane
+
+ we print one line per lock type (i.e. one per benchmark)
+ */
+
+ libshared_ansi_strcat( bg->gnuplot_string, "plot " );
+
+ // TRD : now loop over every benchmark
+ lasue_benchmarks = NULL;
+ count = 1;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(bsets->benchmarks,lasue_benchmarks) )
+ {
+ lasue_temp = lasue_benchmarks;
+ lasue_temp = LFDS710_LIST_ASU_GET_NEXT( *lasue_temp );
+
+ libshared_ansi_strcat( bg->gnuplot_string, " '-' using " );
+ libshared_ansi_strcat_number( bg->gnuplot_string, count++ );
+ libshared_ansi_strcat( bg->gnuplot_string, lasue_temp != NULL ? ", \\\n" : "\n" );
+ }
+
+ /* TRD : now for the results
+ we need to print 0s for the LPs not in the set
+ and only the topology shows the LPs we do not have
+ so iterate over the LPs in topology
+ for each LP in topology
+ we can search the result set for it, because if it's not there, we'll print a 0
+
+ we need to print these all once per benchmark/lock
+ */
+
+ lasue_benchmarks_outer = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(bsets->benchmarks,lasue_benchmarks_outer) )
+ {
+ baue = NULL;
+
+ // TRD : now loop over every benchmark and print its lock name, in quotes
+ lasue_benchmarks = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(bsets->benchmarks,lasue_benchmarks) )
+ {
+ bs = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_benchmarks );
+
+ libshared_ansi_strcat( bg->gnuplot_string, "\"" );
+ libshared_ansi_strcat( bg->gnuplot_string, libbenchmark_globals_lock_names[bs->lock_id] );
+ libshared_ansi_strcat( bg->gnuplot_string, "\" " );
+ }
+
+ libshared_ansi_strcat( bg->gnuplot_string, "\n" );
+
+ baue_inner = NULL;
+
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&bsets->ts->topology_tree, &baue_inner, LFDS710_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ tns = LFDS710_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue_inner );
+
+ if( tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ {
+ // TRD : so we have an LP - now loop over every benchmark, and print 0 if not found, result if found
+ lasue_benchmarks = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(bsets->benchmarks,lasue_benchmarks) )
+ {
+ bs = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_benchmarks );
+
+ // TRD : now, finally, let's go shopping
+ rv = libbenchmark_results_get_result( rs,
+ bsets->datastructure_id,
+ bsets->benchmark_id,
+ bs->lock_id,
+ numa_mode,
+ logical_processor_set,
+ tns,
+ &result );
+
+ if( rv == 0 )
+ libshared_ansi_strcat( bg->gnuplot_string, "0 " );
+
+ if( rv == 1 )
+ {
+ libshared_ansi_strcat_number( bg->gnuplot_string, result / libbenchmark_globals_benchmark_duration_in_seconds);
+ libshared_ansi_strcat( bg->gnuplot_string, " " );
+ }
+ }
+
+ libshared_ansi_strcat( bg->gnuplot_string, "\n" );
+ }
+ }
+
+ libshared_ansi_strcat( bg->gnuplot_string, "e\n" );
+ }
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarkset_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmarkset_init( struct libbenchmark_benchmarkset_state *bsets,
+ enum libbenchmark_datastructure_id datastructure_id,
+ enum libbenchmark_benchmark_id benchmark_id,
+ struct lfds710_list_asu_state *logical_processor_sets,
+ struct lfds710_list_asu_state *numa_modes_list,
+ struct libbenchmark_topology_state *ts,
+ struct libshared_memory_state *ms )
+{
+ LFDS710_PAL_ASSERT( bsets != NULL );
+ // TRD : datastructure_id can be any value in its range
+ // TRD : benchmark_id can be any value in its range
+ LFDS710_PAL_ASSERT( logical_processor_sets != NULL );
+ LFDS710_PAL_ASSERT( numa_modes_list != NULL );
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ bsets->datastructure_id = datastructure_id;
+ bsets->benchmark_id = benchmark_id;
+ lfds710_list_asu_init_valid_on_current_logical_core( &bsets->benchmarks, NULL );
+ bsets->logical_processor_sets = logical_processor_sets;
+ bsets->numa_modes_list = numa_modes_list;
+ bsets->ts = ts;
+ bsets->ms = ms;
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarkset_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmarkset_run( struct libbenchmark_benchmarkset_state *bsets, struct libbenchmark_results_state *rs )
+{
+ lfds710_pal_uint_t
+ number_numa_nodes;
+
+ struct libbenchmark_benchmarkinstance_state
+ *bs;
+
+ struct lfds710_list_asu_element
+ *lasue_benchmarks = NULL,
+ *lasue_lpset = NULL,
+ *lasue_numa = NULL;
+
+ struct lfds710_list_aso_state
+ *logical_processor_set;
+
+ struct libbenchmark_topology_numa_node
+ *numa_mode;
+
+ LFDS710_PAL_ASSERT( bsets != NULL );
+ LFDS710_PAL_ASSERT( rs != NULL );
+
+ libbenchmark_topology_query( bsets->ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMBER_OF_NODE_TYPE, (void *) (lfds710_pal_uint_t) LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA, &number_numa_nodes );
+
+ // TRD : loop over every logical processor set
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*bsets->logical_processor_sets,lasue_lpset) )
+ {
+ logical_processor_set = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_lpset );
+
+ // TRD : now for this logical processor set, execute all benchmarks
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(bsets->benchmarks,lasue_benchmarks) )
+ {
+ bs = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_benchmarks );
+
+ // TRD : run each benchmark instance over each NUMA mode
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*bsets->numa_modes_list,lasue_numa) )
+ {
+ numa_mode = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_numa );
+ libbenchmark_benchmarkinstance_run( bs, logical_processor_set, numa_mode->mode, bsets->ms, rs );
+ }
+ }
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarksuite_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmarksuite_add_benchmarkset( struct libbenchmark_benchmarksuite_state *bss,
+ struct libbenchmark_benchmarkset_state *bsets )
+{
+ LFDS710_PAL_ASSERT( bss != NULL );
+ LFDS710_PAL_ASSERT( bsets != NULL );
+
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( bsets->lasue, bsets );
+ lfds710_list_asu_insert_at_end( &bss->benchmarksets, &bsets->lasue );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarksuite_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libbenchmark_benchmarksuite_cleanup( struct libbenchmark_benchmarksuite_state *bss )
+{
+ LFDS710_PAL_ASSERT( bss != NULL );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarksuite_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmarksuite_get_list_of_gnuplot_strings( struct libbenchmark_benchmarksuite_state *bss,
+ struct libbenchmark_results_state *rs,
+ char *gnuplot_system_string,
+ struct libbenchmark_gnuplot_options *gpo,
+ struct lfds710_list_asu_state *list_of_gnuplot_strings )
+{
+ struct libbenchmark_benchmarkset_state
+ *bsets;
+
+ struct libbenchmark_benchmarkset_gnuplot
+ *bg;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ *lasue_numa = NULL;
+
+ struct libbenchmark_topology_numa_node
+ *numa_mode;
+
+ LFDS710_PAL_ASSERT( bss != NULL );
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( gnuplot_system_string != NULL );
+ LFDS710_PAL_ASSERT( gpo != NULL );
+ LFDS710_PAL_ASSERT( list_of_gnuplot_strings != NULL );
+
+ lfds710_list_asu_init_valid_on_current_logical_core( list_of_gnuplot_strings, NULL );
+
+ // TRD : iterate over all benchmarksets
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(bss->benchmarksets,lasue) )
+ {
+ bsets = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ // TRD : iterate over NUMA nodes - separate gnuplot for each
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*bsets->numa_modes_list,lasue_numa) )
+ {
+ numa_mode = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue_numa );
+
+ bg = libshared_memory_alloc_from_most_free_space_node( bss->ms, sizeof(struct libbenchmark_benchmarkset_gnuplot), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ libbenchmark_benchmarkset_gnuplot_emit( bsets, rs, gnuplot_system_string, numa_mode->mode, gpo, bg );
+
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( bg->lasue, bg );
+ lfds710_list_asu_insert_at_end( list_of_gnuplot_strings, &bg->lasue );
+ }
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarksuite_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 )
+
+void libbenchmark_benchmarksuite_init( struct libbenchmark_benchmarksuite_state *bss,
+ struct libbenchmark_topology_state *ts,
+ struct libshared_memory_state *ms,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ lfds710_pal_uint_t options_bitmask,
+ lfds710_pal_uint_t benchmark_duration_in_seconds )
+{
+ struct libbenchmark_benchmarkinstance_state
+ *bs;
+
+ struct libbenchmark_benchmarkset_state
+ *bsets_btree_au,
+ *bsets_freelist,
+ *bsets_queue_umm;
+
+ LFDS710_PAL_ASSERT( bss != NULL );
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP or numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA );
+ // TRD : options_bitmask is a bitmask and difficult to assert
+ // TRD : benchmark_duration_in_seconds can be any value in its range
+
+ bss->ts = ts;
+ bss->ms = ms;
+ libbenchmark_topology_generate_deduplicated_logical_processor_sets( bss->ts, ms, &bss->lpsets );
+ libbenchmark_topology_generate_numa_modes_list( bss->ts, numa_mode, ms, &bss->numa_modes_list );
+ lfds710_list_asu_init_valid_on_current_logical_core( &bss->benchmarksets, NULL );
+
+ if( options_bitmask & LIBBENCHMARK_BENCHMARKSUITE_OPTION_DURATION )
+ libbenchmark_globals_benchmark_duration_in_seconds = benchmark_duration_in_seconds;
+
+ // TRD : btree_au
+ if( LFDS710_MISC_ATOMIC_SUPPORT_COMPILER_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_PROCESSOR_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_CAS )
+ {
+ // TRD : btree_au set
+ bsets_btree_au = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkset_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkset_init( bsets_btree_au, LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU, LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN, &bss->lpsets, &bss->numa_modes_list, bss->ts, ms );
+ libbenchmark_benchmarksuite_add_benchmarkset( bss, bsets_btree_au );
+
+ if( LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU, LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN, LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_ATOMIC, bss->ts, libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_init, libbenchmark_benchmark_btree_au_gcc_spinlock_atomic_readn_writen_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_btree_au, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU, LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN, LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_SYNC, bss->ts, libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_init, libbenchmark_benchmark_btree_au_gcc_spinlock_sync_readn_writen_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_btree_au, bs );
+ }
+
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU, LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN, LIBBENCHMARK_LOCK_ID_LIBLFDS700_LOCKFREE, bss->ts, libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_init, libbenchmark_benchmark_btree_au_liblfds700_lockfree_readn_writen_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_btree_au, bs );
+
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU, LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN, LIBBENCHMARK_LOCK_ID_LIBLFDS710_LOCKFREE, bss->ts, libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_init, libbenchmark_benchmark_btree_au_liblfds710_lockfree_readn_writen_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_btree_au, bs );
+
+ if( LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU, LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN, LIBBENCHMARK_LOCK_ID_MSVC_SPINLOCK, bss->ts, libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_init, libbenchmark_benchmark_btree_au_msvc_spinlock_readn_writen_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_btree_au, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU, LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN, LIBBENCHMARK_LOCK_ID_PTHREAD_MUTEX, bss->ts, libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_init, libbenchmark_benchmark_btree_au_pthread_mutex_readn_writen_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_btree_au, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU, LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN, LIBBENCHMARK_LOCK_ID_PTHREAD_RWLOCK, bss->ts, libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_init, libbenchmark_benchmark_btree_au_pthread_rwlock_readn_writen_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_btree_au, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU, LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN, LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_PRIVATE, bss->ts, libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_init, libbenchmark_benchmark_btree_au_pthread_spinlock_process_private_readn_writen_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_btree_au, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU, LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN, LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_SHARED, bss->ts, libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_init, libbenchmark_benchmark_btree_au_pthread_spinlock_process_shared_readn_writen_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_btree_au, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU, LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN, LIBBENCHMARK_LOCK_ID_WINDOWS_CRITICAL_SECTION, bss->ts, libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_init, libbenchmark_benchmark_btree_au_windows_critical_section_readn_writen_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_btree_au, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_BTREE_AU, LIBBENCHMARK_BENCHMARK_ID_READN_THEN_WRITEN, LIBBENCHMARK_LOCK_ID_WINDOWS_MUTEX, bss->ts, libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_init, libbenchmark_benchmark_btree_au_windows_mutex_readn_writen_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_btree_au, bs );
+ }
+ }
+
+ // TRD : freelist
+ if( LFDS710_MISC_ATOMIC_SUPPORT_COMPILER_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_PROCESSOR_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_DWCAS )
+ {
+ // TRD : freelist set
+ bsets_freelist = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkset_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkset_init( bsets_freelist, LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST, LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1, &bss->lpsets, &bss->numa_modes_list, bss->ts, ms );
+ libbenchmark_benchmarksuite_add_benchmarkset( bss, bsets_freelist );
+
+ if( LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST, LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1, LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_ATOMIC, bss->ts, libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_init, libbenchmark_benchmark_freelist_gcc_spinlock_atomic_push1_pop1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_freelist, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST, LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1, LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_SYNC, bss->ts, libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_init, libbenchmark_benchmark_freelist_gcc_spinlock_sync_push1_pop1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_freelist, bs );
+ }
+
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST, LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1, LIBBENCHMARK_LOCK_ID_LIBLFDS700_LOCKFREE, bss->ts, libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_init, libbenchmark_benchmark_freelist_liblfds700_lockfree_push1_pop1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_freelist, bs );
+
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST, LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1, LIBBENCHMARK_LOCK_ID_LIBLFDS710_LOCKFREE, bss->ts, libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_init, libbenchmark_benchmark_freelist_liblfds710_lockfree_push1_pop1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_freelist, bs );
+
+ if( LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST, LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1, LIBBENCHMARK_LOCK_ID_MSVC_SPINLOCK, bss->ts, libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_init, libbenchmark_benchmark_freelist_msvc_spinlock_push1_pop1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_freelist, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST, LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1, LIBBENCHMARK_LOCK_ID_PTHREAD_MUTEX, bss->ts, libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_init, libbenchmark_benchmark_freelist_pthread_mutex_push1_pop1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_freelist, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST, LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1, LIBBENCHMARK_LOCK_ID_PTHREAD_RWLOCK, bss->ts, libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_init, libbenchmark_benchmark_freelist_pthread_rwlock_push1_pop1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_freelist, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST, LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1, LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_PRIVATE, bss->ts, libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_init, libbenchmark_benchmark_freelist_pthread_spinlock_process_private_push1_pop1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_freelist, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST, LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1, LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_SHARED, bss->ts, libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_init, libbenchmark_benchmark_freelist_pthread_spinlock_process_shared_push1_pop1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_freelist, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST, LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1, LIBBENCHMARK_LOCK_ID_WINDOWS_CRITICAL_SECTION, bss->ts, libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_init, libbenchmark_benchmark_freelist_windows_critical_section_push1_pop1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_freelist, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_FREELIST, LIBBENCHMARK_BENCHMARK_ID_PUSH1_THEN_POP1, LIBBENCHMARK_LOCK_ID_WINDOWS_MUTEX, bss->ts, libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_init, libbenchmark_benchmark_freelist_windows_mutex_push1_pop1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_freelist, bs );
+ }
+ }
+
+ // TRD : queue_umm
+ if( LFDS710_MISC_ATOMIC_SUPPORT_COMPILER_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_PROCESSOR_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_DWCAS )
+ {
+ // TRD : queue_umm set
+ bsets_queue_umm = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkset_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkset_init( bsets_queue_umm, LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM, LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1, &bss->lpsets, &bss->numa_modes_list, bss->ts, ms );
+ libbenchmark_benchmarksuite_add_benchmarkset( bss, bsets_queue_umm );
+
+ if( LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM, LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1, LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_ATOMIC, bss->ts, libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_init, libbenchmark_benchmark_queue_umm_gcc_spinlock_atomic_enqueue1_dequeue1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_queue_umm, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM, LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1, LIBBENCHMARK_LOCK_ID_GCC_SPINLOCK_SYNC, bss->ts, libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_init, libbenchmark_benchmark_queue_umm_gcc_spinlock_sync_enqueue1_dequeue1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_queue_umm, bs );
+ }
+
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM, LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1, LIBBENCHMARK_LOCK_ID_LIBLFDS700_LOCKFREE, bss->ts, libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_init, libbenchmark_benchmark_queue_umm_liblfds700_lockfree_enqueue1_dequeue1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_queue_umm, bs );
+
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM, LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1, LIBBENCHMARK_LOCK_ID_LIBLFDS710_LOCKFREE, bss->ts, libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_init, libbenchmark_benchmark_queue_umm_liblfds710_lockfree_enqueue1_dequeue1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_queue_umm, bs );
+
+ if( LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM, LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1, LIBBENCHMARK_LOCK_ID_MSVC_SPINLOCK, bss->ts, libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_init, libbenchmark_benchmark_queue_umm_msvc_spinlock_enqueue1_dequeue1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_queue_umm, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM, LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1, LIBBENCHMARK_LOCK_ID_PTHREAD_MUTEX, bss->ts, libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_init, libbenchmark_benchmark_queue_umm_pthread_mutex_enqueue1_dequeue1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_queue_umm, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM, LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1, LIBBENCHMARK_LOCK_ID_PTHREAD_RWLOCK, bss->ts, libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_init, libbenchmark_benchmark_queue_umm_pthread_rwlock_enqueue1_dequeue1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_queue_umm, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM, LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1, LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_PRIVATE, bss->ts, libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_init, libbenchmark_benchmark_queue_umm_pthread_spinlock_process_private_enqueue1_dequeue1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_queue_umm, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM, LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1, LIBBENCHMARK_LOCK_ID_PTHREAD_SPINLOCK_PROCESS_SHARED, bss->ts, libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_init, libbenchmark_benchmark_queue_umm_pthread_spinlock_process_shared_enqueue1_dequeue1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_queue_umm, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM, LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1, LIBBENCHMARK_LOCK_ID_WINDOWS_CRITICAL_SECTION, bss->ts, libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_init, libbenchmark_benchmark_queue_umm_windows_critical_section_enqueue1_dequeue1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_queue_umm, bs );
+ }
+
+ if( LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX )
+ {
+ bs = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_benchmarkinstance_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ libbenchmark_benchmarkinstance_init( bs, LIBBENCHMARK_DATASTRUCTURE_ID_QUEUE_UMM, LIBBENCHMARK_BENCHMARK_ID_ENQUEUE_UMM1_THEN_DEQUEUE_UMM1, LIBBENCHMARK_LOCK_ID_WINDOWS_MUTEX, bss->ts, libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_init, libbenchmark_benchmark_queue_umm_windows_mutex_enqueue1_dequeue1_cleanup );
+ libbenchmark_benchmarkset_add_benchmark( bsets_queue_umm, bs );
+ }
+ }
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_benchmarksuite_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_benchmarksuite_run( struct libbenchmark_benchmarksuite_state *bss, struct libbenchmark_results_state *rs )
+{
+ char
+ *topology_string;
+
+ struct libbenchmark_benchmarkset_state
+ *bsets;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ LFDS710_PAL_ASSERT( bss != NULL );
+ LFDS710_PAL_ASSERT( rs != NULL );
+
+ topology_string = libbenchmark_topology_generate_string( bss->ts, bss->ms, LIBBENCHMARK_TOPOLOGY_STRING_FORMAT_STDOUT );
+
+ libbenchmark_pal_print_string( topology_string );
+
+ // TRD : iterate over all benchmarksets
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(bss->benchmarksets,lasue) )
+ {
+ bsets = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ libbenchmark_benchmarkset_run( bsets, rs );
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_btree_au_internal.h"
+
+/***** private prototypes *****/
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **baue );
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **baue );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_init( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_existing_key existing_key,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ baus->root = NULL;
+ baus->key_compare_function = key_compare_function;
+ baus->existing_key = existing_key;
+ baus->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_CREATE( baus->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_cleanup( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element *baue) )
+{
+ enum libbenchmark_datastructure_btree_au_delete_action
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF; // TRD : to remove compiler warning
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element
+ *baue,
+ *temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : element_delete_function can be NULL
+
+ if( element_cleanup_callback != NULL )
+ {
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_absolute_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_ABSOLUTE_POSITION_ROOT );
+
+ while( baue != NULL )
+ {
+ if( baue->left == NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF;
+
+ if( baue->left != NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD;
+
+ if( baue->left == NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD;
+
+ if( baue->left != NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT;
+
+ switch( delete_action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF:
+ // TRD : if we have a parent (we could be root) set his point to us to NULL
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = NULL;
+ if( baue->up->right == baue )
+ baue->up->right = NULL;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_UP );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD:
+ baue->left->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->left;
+ if( baue->up->right == baue )
+ baue->up->right = baue->left;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_LEFT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD:
+ baue->right->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->right;
+ if( baue->up->right == baue )
+ baue->up->right = baue->right;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_RIGHT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT:
+ libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_LEFT );
+ break;
+ }
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_DESTROY( baus->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_insert_result libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_insert( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element *baue,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **existing_baue )
+{
+ int
+ compare_result = 0;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element
+ *baue_next = NULL,
+ *baue_parent = NULL,
+ *baue_temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : existing_baue can be NULL
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_GET( baus->lock );
+
+ baue->up = baue->left = baue->right = NULL;
+
+ baue_temp = baus->root;
+
+ while( baue_temp != NULL )
+ {
+ compare_result = baus->key_compare_function( baue->key, baue_temp->key );
+
+ if( compare_result == 0 )
+ {
+ if( existing_baue != NULL )
+ *existing_baue = baue_temp;
+
+ switch( baus->existing_key )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_EXISTING_KEY_OVERWRITE:
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_SET_VALUE_IN_ELEMENT( *baus, *baue_temp, baue->value );
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_INSERT_RESULT_SUCCESS_OVERWRITE;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_EXISTING_KEY_FAIL:
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_INSERT_RESULT_FAILURE_EXISTING_KEY;
+ break;
+ }
+ }
+
+ if( compare_result < 0 )
+ baue_next = baue_temp->left;
+
+ if( compare_result > 0 )
+ baue_next = baue_temp->right;
+
+ baue_parent = baue_temp;
+ baue_temp = baue_next;
+ }
+
+ if( baue_parent == NULL )
+ {
+ baue->up = baus->root;
+ baus->root = baue; }
+
+ if( baue_parent != NULL )
+ {
+ if( compare_result <= 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->left = baue;
+ }
+
+ if( compare_result > 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->right = baue;
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_RELEASE( baus->lock );
+
+ // TRD : if we get to here, we added (not failed or overwrite on exist) a new element
+ if( existing_baue != NULL )
+ *existing_baue = NULL;
+
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_INSERT_RESULT_SUCCESS;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_key( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **baue )
+{
+ int
+ compare_result = !0,
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : key_compare_function can be NULL
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ if( key_compare_function == NULL )
+ key_compare_function = baus->key_compare_function;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_GET( baus->lock );
+
+ *baue = baus->root;
+
+ while( *baue != NULL and compare_result != 0 )
+ {
+ compare_result = key_compare_function( key, (*baue)->key );
+
+ if( compare_result < 0 )
+ *baue = (*baue)->left;
+
+ if( compare_result > 0 )
+ *baue = (*baue)->right;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **baue, enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_absolute_position absolute_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : absolute_position can be any value in its range
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_GET( baus->lock );
+
+ *baue = baus->root;
+
+ switch( absolute_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_ABSOLUTE_POSITION_ROOT:
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_ABSOLUTE_POSITION_LARGEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_ABSOLUTE_POSITION_SMALLEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_relative_position( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **baue, enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_relative_position relative_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : relative_position can baue any value in its range
+
+ if( *baue == NULL )
+ return 0;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_GET( baus->lock );
+
+ switch( relative_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_UP:
+ *baue = (*baue)->up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_LEFT:
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_RIGHT:
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( baus, baue );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_ATOMIC_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( baus, baue );
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element
+ *left,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ /* TRD : from any given element, the next smallest element is;
+ 1. if we have a left, it's the largest element on the right branch of our left child
+ 2. if we don't have a left, and we're on the right of our parent, then it's our parent
+ 3. if we don't have a left, and we're on the left of our parent or we have no parent,
+ iterative up the tree until we find the first child who is on the right of its parent; then it's the parent
+ */
+
+ left = (*baue)->left;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( left != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD;
+
+ if( left == NULL and up != NULL and up_right == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (left == NULL and up == NULL) or (up != NULL and up_left == *baue and left == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ // TRD : eliminates a compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ *baue = left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_left = (*baue)->up->left;
+
+ if( *baue != NULL and up != NULL and *baue == up_left )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element
+ *right = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ right = (*baue)->right;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( right != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD;
+
+ if( right == NULL and up != NULL and up_left == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (right == NULL and up == NULL) or (up != NULL and up_right == *baue and right == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ // TRD : remove compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ *baue = right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_right = (*baue)->up->right;
+
+ if( *baue != NULL and up != NULL and *baue == up_right )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_element **baue, enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_absolute_position absolute_position, enum libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_relative_position relative_position )
+{
+ int
+ rv;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD: absolute_position can be any value in its range
+ // TRD: relative_position can be any value in its range
+
+ if( *baue == NULL )
+ rv = libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_absolute_position( baus, baue, absolute_position );
+ else
+ rv = libbenchmark_datastructure_btree_au_gcc_spinlock_atomic_get_by_relative_position( baus, baue, relative_position );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_btree_au_internal.h"
+
+/***** private prototypes *****/
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **baue );
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **baue );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_gcc_spinlock_sync_init( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_existing_key existing_key,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ baus->root = NULL;
+ baus->key_compare_function = key_compare_function;
+ baus->existing_key = existing_key;
+ baus->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_CREATE( baus->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_gcc_spinlock_sync_cleanup( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element *baue) )
+{
+ enum libbenchmark_datastructure_btree_au_delete_action
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF; // TRD : to remove compiler warning
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element
+ *baue,
+ *temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : element_delete_function can be NULL
+
+ if( element_cleanup_callback != NULL )
+ {
+ libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_absolute_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_ABSOLUTE_POSITION_ROOT );
+
+ while( baue != NULL )
+ {
+ if( baue->left == NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF;
+
+ if( baue->left != NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD;
+
+ if( baue->left == NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD;
+
+ if( baue->left != NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT;
+
+ switch( delete_action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF:
+ // TRD : if we have a parent (we could be root) set his point to us to NULL
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = NULL;
+ if( baue->up->right == baue )
+ baue->up->right = NULL;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_UP );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD:
+ baue->left->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->left;
+ if( baue->up->right == baue )
+ baue->up->right = baue->left;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_LEFT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD:
+ baue->right->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->right;
+ if( baue->up->right == baue )
+ baue->up->right = baue->right;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_RIGHT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT:
+ libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_LEFT );
+ break;
+ }
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_DESTROY( baus->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_insert_result libbenchmark_datastructure_btree_au_gcc_spinlock_sync_insert( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element *baue,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **existing_baue )
+{
+ int
+ compare_result = 0;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element
+ *baue_next = NULL,
+ *baue_parent = NULL,
+ *baue_temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : existing_baue can be NULL
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_GET( baus->lock );
+
+ baue->up = baue->left = baue->right = NULL;
+
+ baue_temp = baus->root;
+
+ while( baue_temp != NULL )
+ {
+ compare_result = baus->key_compare_function( baue->key, baue_temp->key );
+
+ if( compare_result == 0 )
+ {
+ if( existing_baue != NULL )
+ *existing_baue = baue_temp;
+
+ switch( baus->existing_key )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_EXISTING_KEY_OVERWRITE:
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_SET_VALUE_IN_ELEMENT( *baus, *baue_temp, baue->value );
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_INSERT_RESULT_SUCCESS_OVERWRITE;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_EXISTING_KEY_FAIL:
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_INSERT_RESULT_FAILURE_EXISTING_KEY;
+ break;
+ }
+ }
+
+ if( compare_result < 0 )
+ baue_next = baue_temp->left;
+
+ if( compare_result > 0 )
+ baue_next = baue_temp->right;
+
+ baue_parent = baue_temp;
+ baue_temp = baue_next;
+ }
+
+ if( baue_parent == NULL )
+ {
+ baue->up = baus->root;
+ baus->root = baue; }
+
+ if( baue_parent != NULL )
+ {
+ if( compare_result <= 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->left = baue;
+ }
+
+ if( compare_result > 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->right = baue;
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_RELEASE( baus->lock );
+
+ // TRD : if we get to here, we added (not failed or overwrite on exist) a new element
+ if( existing_baue != NULL )
+ *existing_baue = NULL;
+
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_INSERT_RESULT_SUCCESS;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_key( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **baue )
+{
+ int
+ compare_result = !0,
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : key_compare_function can be NULL
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ if( key_compare_function == NULL )
+ key_compare_function = baus->key_compare_function;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_GET( baus->lock );
+
+ *baue = baus->root;
+
+ while( *baue != NULL and compare_result != 0 )
+ {
+ compare_result = key_compare_function( key, (*baue)->key );
+
+ if( compare_result < 0 )
+ *baue = (*baue)->left;
+
+ if( compare_result > 0 )
+ *baue = (*baue)->right;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **baue, enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_absolute_position absolute_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : absolute_position can be any value in its range
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_GET( baus->lock );
+
+ *baue = baus->root;
+
+ switch( absolute_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_ABSOLUTE_POSITION_ROOT:
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_ABSOLUTE_POSITION_LARGEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_ABSOLUTE_POSITION_SMALLEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_relative_position( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **baue, enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_relative_position relative_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : relative_position can baue any value in its range
+
+ if( *baue == NULL )
+ return 0;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_GET( baus->lock );
+
+ switch( relative_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_UP:
+ *baue = (*baue)->up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_LEFT:
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_RIGHT:
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( baus, baue );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_GCC_SPINLOCK_SYNC_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( baus, baue );
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element
+ *left = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ /* TRD : from any given element, the next smallest element is;
+ 1. if we have a left, it's the largest element on the right branch of our left child
+ 2. if we don't have a left, and we're on the right of our parent, then it's our parent
+ 3. if we don't have a left, and we're on the left of our parent or we have no parent,
+ iterative up the tree until we find the first child who is on the right of its parent; then it's the parent
+ */
+
+ left = (*baue)->left;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( left != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD;
+
+ if( left == NULL and up != NULL and up_right == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (left == NULL and up == NULL) or (up != NULL and up_left == *baue and left == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ // TRD : eliminates a compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ *baue = left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_left = (*baue)->up->left;
+
+ if( *baue != NULL and up != NULL and *baue == up_left )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element
+ *right = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ right = (*baue)->right;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( right != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD;
+
+ if( right == NULL and up != NULL and up_left == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (right == NULL and up == NULL) or (up != NULL and up_right == *baue and right == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ // TRD : remove compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ *baue = right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_right = (*baue)->up->right;
+
+ if( *baue != NULL and up != NULL and *baue == up_right )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_state *baus, struct libbenchmark_datastructure_btree_au_gcc_spinlock_sync_element **baue, enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_absolute_position absolute_position, enum libbenchmark_datastructure_btree_au_gcc_spinlock_sync_relative_position relative_position )
+{
+ int
+ rv;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD: absolute_position can be any value in its range
+ // TRD: relative_position can be any value in its range
+
+ if( *baue == NULL )
+ rv = libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_absolute_position( baus, baue, absolute_position );
+ else
+ rv = libbenchmark_datastructure_btree_au_gcc_spinlock_sync_get_by_relative_position( baus, baue, relative_position );
+
+ return rv;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** enums *****/
+enum libbenchmark_datastructure_btree_au_move
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE
+};
+
+enum libbenchmark_datastructure_btree_au_delete_action
+{
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD,
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT
+};
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_btree_au_internal.h"
+
+/***** private prototypes *****/
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus, struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **baue );
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus, struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **baue );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_msvc_spinlock_init( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_msvc_spinlock_existing_key existing_key,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ baus->root = NULL;
+ baus->key_compare_function = key_compare_function;
+ baus->existing_key = existing_key;
+ baus->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_CREATE( baus->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_msvc_spinlock_cleanup( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus, struct libbenchmark_datastructure_btree_au_msvc_spinlock_element *baue) )
+{
+ enum libbenchmark_datastructure_btree_au_delete_action
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF; // TRD : to remove compiler warning
+
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element
+ *baue,
+ *temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : element_delete_function can be NULL
+
+ if( element_cleanup_callback != NULL )
+ {
+ libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_absolute_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_ABSOLUTE_POSITION_ROOT );
+
+ while( baue != NULL )
+ {
+ if( baue->left == NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF;
+
+ if( baue->left != NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD;
+
+ if( baue->left == NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD;
+
+ if( baue->left != NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT;
+
+ switch( delete_action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF:
+ // TRD : if we have a parent (we could be root) set his point to us to NULL
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = NULL;
+ if( baue->up->right == baue )
+ baue->up->right = NULL;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_UP );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD:
+ baue->left->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->left;
+ if( baue->up->right == baue )
+ baue->up->right = baue->left;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_LEFT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD:
+ baue->right->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->right;
+ if( baue->up->right == baue )
+ baue->up->right = baue->right;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_RIGHT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT:
+ libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_LEFT );
+ break;
+ }
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_DESTROY( baus->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+enum libbenchmark_datastructure_btree_au_msvc_spinlock_insert_result libbenchmark_datastructure_btree_au_msvc_spinlock_insert( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus,
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element *baue,
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **existing_baue )
+{
+ int
+ compare_result = 0;
+
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element
+ *baue_next = NULL,
+ *baue_parent = NULL,
+ *baue_temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : existing_baue can be NULL
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_GET( baus->lock );
+
+ baue->up = baue->left = baue->right = NULL;
+
+ baue_temp = baus->root;
+
+ while( baue_temp != NULL )
+ {
+ compare_result = baus->key_compare_function( baue->key, baue_temp->key );
+
+ if( compare_result == 0 )
+ {
+ if( existing_baue != NULL )
+ *existing_baue = baue_temp;
+
+ switch( baus->existing_key )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_EXISTING_KEY_OVERWRITE:
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_SET_VALUE_IN_ELEMENT( *baus, *baue_temp, baue->value );
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_INSERT_RESULT_SUCCESS_OVERWRITE;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_EXISTING_KEY_FAIL:
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_INSERT_RESULT_FAILURE_EXISTING_KEY;
+ break;
+ }
+ }
+
+ if( compare_result < 0 )
+ baue_next = baue_temp->left;
+
+ if( compare_result > 0 )
+ baue_next = baue_temp->right;
+
+ baue_parent = baue_temp;
+ baue_temp = baue_next;
+ }
+
+ if( baue_parent == NULL )
+ {
+ baue->up = baus->root;
+ baus->root = baue; }
+
+ if( baue_parent != NULL )
+ {
+ if( compare_result <= 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->left = baue;
+ }
+
+ if( compare_result > 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->right = baue;
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_RELEASE( baus->lock );
+
+ // TRD : if we get to here, we added (not failed or overwrite on exist) a new element
+ if( existing_baue != NULL )
+ *existing_baue = NULL;
+
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_INSERT_RESULT_SUCCESS;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_key( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **baue )
+{
+ int
+ compare_result = !0,
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : key_compare_function can be NULL
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ if( key_compare_function == NULL )
+ key_compare_function = baus->key_compare_function;
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_GET( baus->lock );
+
+ *baue = baus->root;
+
+ while( *baue != NULL and compare_result != 0 )
+ {
+ compare_result = key_compare_function( key, (*baue)->key );
+
+ if( compare_result < 0 )
+ *baue = (*baue)->left;
+
+ if( compare_result > 0 )
+ *baue = (*baue)->right;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus, struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **baue, enum libbenchmark_datastructure_btree_au_msvc_spinlock_absolute_position absolute_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : absolute_position can be any value in its range
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_GET( baus->lock );
+
+ *baue = baus->root;
+
+ switch( absolute_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_ABSOLUTE_POSITION_ROOT:
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_ABSOLUTE_POSITION_LARGEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_ABSOLUTE_POSITION_SMALLEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_relative_position( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus, struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **baue, enum libbenchmark_datastructure_btree_au_msvc_spinlock_relative_position relative_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : relative_position can baue any value in its range
+
+ if( *baue == NULL )
+ return 0;
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_GET( baus->lock );
+
+ switch( relative_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_UP:
+ *baue = (*baue)->up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_LEFT:
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_RIGHT:
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( baus, baue );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MSVC_SPINLOCK_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( baus, baue );
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus, struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element
+ *left = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ /* TRD : from any given element, the next smallest element is;
+ 1. if we have a left, it's the largest element on the right branch of our left child
+ 2. if we don't have a left, and we're on the right of our parent, then it's our parent
+ 3. if we don't have a left, and we're on the left of our parent or we have no parent,
+ iterative up the tree until we find the first child who is on the right of its parent; then it's the parent
+ */
+
+ left = (*baue)->left;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( left != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD;
+
+ if( left == NULL and up != NULL and up_right == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (left == NULL and up == NULL) or (up != NULL and up_left == *baue and left == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ // TRD : eliminates a compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ *baue = left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_left = (*baue)->up->left;
+
+ if( *baue != NULL and up != NULL and *baue == up_left )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus, struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_msvc_spinlock_element
+ *right = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ right = (*baue)->right;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( right != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD;
+
+ if( right == NULL and up != NULL and up_left == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (right == NULL and up == NULL) or (up != NULL and up_right == *baue and right == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ // TRD : remove compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ *baue = right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_right = (*baue)->up->right;
+
+ if( *baue != NULL and up != NULL and *baue == up_right )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_msvc_spinlock_state *baus, struct libbenchmark_datastructure_btree_au_msvc_spinlock_element **baue, enum libbenchmark_datastructure_btree_au_msvc_spinlock_absolute_position absolute_position, enum libbenchmark_datastructure_btree_au_msvc_spinlock_relative_position relative_position )
+{
+ int
+ rv;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD: absolute_position can be any value in its range
+ // TRD: relative_position can be any value in its range
+
+ if( *baue == NULL )
+ rv = libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_absolute_position( baus, baue, absolute_position );
+ else
+ rv = libbenchmark_datastructure_btree_au_msvc_spinlock_get_by_relative_position( baus, baue, relative_position );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_btree_au_internal.h"
+
+/***** private prototypes *****/
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus, struct libbenchmark_datastructure_btree_au_pthread_mutex_element **baue );
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus, struct libbenchmark_datastructure_btree_au_pthread_mutex_element **baue );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_pthread_mutex_init( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_pthread_mutex_existing_key existing_key,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ baus->root = NULL;
+ baus->key_compare_function = key_compare_function;
+ baus->existing_key = existing_key;
+ baus->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_CREATE( baus->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_pthread_mutex_cleanup( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus, struct libbenchmark_datastructure_btree_au_pthread_mutex_element *baue) )
+{
+ enum libbenchmark_datastructure_btree_au_delete_action
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF; // TRD : to remove compiler warning
+
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element
+ *baue,
+ *temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : element_delete_function can be NULL
+
+ if( element_cleanup_callback != NULL )
+ {
+ libbenchmark_datastructure_btree_au_pthread_mutex_get_by_absolute_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_ABSOLUTE_POSITION_ROOT );
+
+ while( baue != NULL )
+ {
+ if( baue->left == NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF;
+
+ if( baue->left != NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD;
+
+ if( baue->left == NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD;
+
+ if( baue->left != NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT;
+
+ switch( delete_action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF:
+ // TRD : if we have a parent (we could be root) set his point to us to NULL
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = NULL;
+ if( baue->up->right == baue )
+ baue->up->right = NULL;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_pthread_mutex_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_UP );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD:
+ baue->left->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->left;
+ if( baue->up->right == baue )
+ baue->up->right = baue->left;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_pthread_mutex_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_LEFT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD:
+ baue->right->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->right;
+ if( baue->up->right == baue )
+ baue->up->right = baue->right;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_pthread_mutex_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_RIGHT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT:
+ libbenchmark_datastructure_btree_au_pthread_mutex_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_LEFT );
+ break;
+ }
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_DESTROY( baus->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+enum libbenchmark_datastructure_btree_au_pthread_mutex_insert_result libbenchmark_datastructure_btree_au_pthread_mutex_insert( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element *baue,
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element **existing_baue )
+{
+ int
+ compare_result = 0;
+
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element
+ *baue_next = NULL,
+ *baue_parent = NULL,
+ *baue_temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : existing_baue can be NULL
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_GET( baus->lock );
+
+ baue->up = baue->left = baue->right = NULL;
+
+ baue_temp = baus->root;
+
+ while( baue_temp != NULL )
+ {
+ compare_result = baus->key_compare_function( baue->key, baue_temp->key );
+
+ if( compare_result == 0 )
+ {
+ if( existing_baue != NULL )
+ *existing_baue = baue_temp;
+
+ switch( baus->existing_key )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_EXISTING_KEY_OVERWRITE:
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_SET_VALUE_IN_ELEMENT( *baus, *baue_temp, baue->value );
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_INSERT_RESULT_SUCCESS_OVERWRITE;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_EXISTING_KEY_FAIL:
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_INSERT_RESULT_FAILURE_EXISTING_KEY;
+ break;
+ }
+ }
+
+ if( compare_result < 0 )
+ baue_next = baue_temp->left;
+
+ if( compare_result > 0 )
+ baue_next = baue_temp->right;
+
+ baue_parent = baue_temp;
+ baue_temp = baue_next;
+ }
+
+ if( baue_parent == NULL )
+ {
+ baue->up = baus->root;
+ baus->root = baue; }
+
+ if( baue_parent != NULL )
+ {
+ if( compare_result <= 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->left = baue;
+ }
+
+ if( compare_result > 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->right = baue;
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_RELEASE( baus->lock );
+
+ // TRD : if we get to here, we added (not failed or overwrite on exist) a new element
+ if( existing_baue != NULL )
+ *existing_baue = NULL;
+
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_INSERT_RESULT_SUCCESS;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_mutex_get_by_key( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element **baue )
+{
+ int
+ compare_result = !0,
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : key_compare_function can be NULL
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ if( key_compare_function == NULL )
+ key_compare_function = baus->key_compare_function;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_GET( baus->lock );
+
+ *baue = baus->root;
+
+ while( *baue != NULL and compare_result != 0 )
+ {
+ compare_result = key_compare_function( key, (*baue)->key );
+
+ if( compare_result < 0 )
+ *baue = (*baue)->left;
+
+ if( compare_result > 0 )
+ *baue = (*baue)->right;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_mutex_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus, struct libbenchmark_datastructure_btree_au_pthread_mutex_element **baue, enum libbenchmark_datastructure_btree_au_pthread_mutex_absolute_position absolute_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : absolute_position can be any value in its range
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_GET( baus->lock );
+
+ *baue = baus->root;
+
+ switch( absolute_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_ABSOLUTE_POSITION_ROOT:
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_ABSOLUTE_POSITION_LARGEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_ABSOLUTE_POSITION_SMALLEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_mutex_get_by_relative_position( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus, struct libbenchmark_datastructure_btree_au_pthread_mutex_element **baue, enum libbenchmark_datastructure_btree_au_pthread_mutex_relative_position relative_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : relative_position can baue any value in its range
+
+ if( *baue == NULL )
+ return 0;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_GET( baus->lock );
+
+ switch( relative_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_UP:
+ *baue = (*baue)->up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_LEFT:
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_RIGHT:
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( baus, baue );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_MUTEX_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( baus, baue );
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus, struct libbenchmark_datastructure_btree_au_pthread_mutex_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element
+ *left = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ /* TRD : from any given element, the next smallest element is;
+ 1. if we have a left, it's the largest element on the right branch of our left child
+ 2. if we don't have a left, and we're on the right of our parent, then it's our parent
+ 3. if we don't have a left, and we're on the left of our parent or we have no parent,
+ iterative up the tree until we find the first child who is on the right of its parent; then it's the parent
+ */
+
+ left = (*baue)->left;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( left != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD;
+
+ if( left == NULL and up != NULL and up_right == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (left == NULL and up == NULL) or (up != NULL and up_left == *baue and left == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ // TRD : eliminates a compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ *baue = left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_left = (*baue)->up->left;
+
+ if( *baue != NULL and up != NULL and *baue == up_left )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus, struct libbenchmark_datastructure_btree_au_pthread_mutex_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_pthread_mutex_element
+ *right = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ right = (*baue)->right;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( right != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD;
+
+ if( right == NULL and up != NULL and up_left == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (right == NULL and up == NULL) or (up != NULL and up_right == *baue and right == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ // TRD : remove compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ *baue = right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_right = (*baue)->up->right;
+
+ if( *baue != NULL and up != NULL and *baue == up_right )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_mutex_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_pthread_mutex_state *baus, struct libbenchmark_datastructure_btree_au_pthread_mutex_element **baue, enum libbenchmark_datastructure_btree_au_pthread_mutex_absolute_position absolute_position, enum libbenchmark_datastructure_btree_au_pthread_mutex_relative_position relative_position )
+{
+ int
+ rv;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD: absolute_position can be any value in its range
+ // TRD: relative_position can be any value in its range
+
+ if( *baue == NULL )
+ rv = libbenchmark_datastructure_btree_au_pthread_mutex_get_by_absolute_position( baus, baue, absolute_position );
+ else
+ rv = libbenchmark_datastructure_btree_au_pthread_mutex_get_by_relative_position( baus, baue, relative_position );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_btree_au_internal.h"
+
+/***** private prototypes *****/
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus, struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue );
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus, struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_pthread_rwlock_init( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_pthread_rwlock_existing_key existing_key,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ baus->root = NULL;
+ baus->key_compare_function = key_compare_function;
+ baus->existing_key = existing_key;
+ baus->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_CREATE( baus->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_pthread_rwlock_cleanup( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus, struct libbenchmark_datastructure_btree_au_pthread_rwlock_element *baue) )
+{
+ enum libbenchmark_datastructure_btree_au_delete_action
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF; // TRD : to remove compiler warning
+
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element
+ *baue,
+ *temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : element_delete_function can be NULL
+
+ if( element_cleanup_callback != NULL )
+ {
+ libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_absolute_position_for_read( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_ABSOLUTE_POSITION_ROOT );
+
+ while( baue != NULL )
+ {
+ if( baue->left == NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF;
+
+ if( baue->left != NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD;
+
+ if( baue->left == NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD;
+
+ if( baue->left != NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT;
+
+ switch( delete_action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF:
+ // TRD : if we have a parent (we could be root) set his point to us to NULL
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = NULL;
+ if( baue->up->right == baue )
+ baue->up->right = NULL;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_relative_position_for_read( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_UP );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD:
+ baue->left->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->left;
+ if( baue->up->right == baue )
+ baue->up->right = baue->left;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_relative_position_for_read( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_LEFT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD:
+ baue->right->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->right;
+ if( baue->up->right == baue )
+ baue->up->right = baue->right;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_relative_position_for_read( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_RIGHT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT:
+ libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_relative_position_for_read( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_LEFT );
+ break;
+ }
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_DESTROY( baus->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+enum libbenchmark_datastructure_btree_au_pthread_rwlock_insert_result libbenchmark_datastructure_btree_au_pthread_rwlock_insert( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element *baue,
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **existing_baue )
+{
+ int
+ compare_result = 0;
+
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element
+ *baue_next = NULL,
+ *baue_parent = NULL,
+ *baue_temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : existing_baue can be NULL
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_WRITE( baus->lock );
+
+ baue->up = baue->left = baue->right = NULL;
+
+ baue_temp = baus->root;
+
+ while( baue_temp != NULL )
+ {
+ compare_result = baus->key_compare_function( baue->key, baue_temp->key );
+
+ if( compare_result == 0 )
+ {
+ if( existing_baue != NULL )
+ *existing_baue = baue_temp;
+
+ switch( baus->existing_key )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_EXISTING_KEY_OVERWRITE:
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_SET_VALUE_IN_ELEMENT( *baus, *baue_temp, baue->value );
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_INSERT_RESULT_SUCCESS_OVERWRITE;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_EXISTING_KEY_FAIL:
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_INSERT_RESULT_FAILURE_EXISTING_KEY;
+ break;
+ }
+ }
+
+ if( compare_result < 0 )
+ baue_next = baue_temp->left;
+
+ if( compare_result > 0 )
+ baue_next = baue_temp->right;
+
+ baue_parent = baue_temp;
+ baue_temp = baue_next;
+ }
+
+ if( baue_parent == NULL )
+ {
+ baue->up = baus->root;
+ baus->root = baue; }
+
+ if( baue_parent != NULL )
+ {
+ if( compare_result <= 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->left = baue;
+ }
+
+ if( compare_result > 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->right = baue;
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_RELEASE( baus->lock );
+
+ // TRD : if we get to here, we added (not failed or overwrite on exist) a new element
+ if( existing_baue != NULL )
+ *existing_baue = NULL;
+
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_INSERT_RESULT_SUCCESS;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_key_for_read( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue )
+{
+ int
+ compare_result = !0,
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : key_compare_function can be NULL
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ if( key_compare_function == NULL )
+ key_compare_function = baus->key_compare_function;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_READ( baus->lock );
+
+ *baue = baus->root;
+
+ while( *baue != NULL and compare_result != 0 )
+ {
+ compare_result = key_compare_function( key, (*baue)->key );
+
+ if( compare_result < 0 )
+ *baue = (*baue)->left;
+
+ if( compare_result > 0 )
+ *baue = (*baue)->right;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_key_for_write( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue )
+{
+ int
+ compare_result = !0,
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : key_compare_function can be NULL
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ if( key_compare_function == NULL )
+ key_compare_function = baus->key_compare_function;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_WRITE( baus->lock );
+
+ *baue = baus->root;
+
+ while( *baue != NULL and compare_result != 0 )
+ {
+ compare_result = key_compare_function( key, (*baue)->key );
+
+ if( compare_result < 0 )
+ *baue = (*baue)->left;
+
+ if( compare_result > 0 )
+ *baue = (*baue)->right;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_absolute_position_for_read( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus, struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue, enum libbenchmark_datastructure_btree_au_pthread_rwlock_absolute_position absolute_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : absolute_position can be any value in its range
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_READ( baus->lock );
+
+ *baue = baus->root;
+
+ switch( absolute_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_ABSOLUTE_POSITION_ROOT:
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_ABSOLUTE_POSITION_LARGEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_ABSOLUTE_POSITION_SMALLEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_relative_position_for_read( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus, struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue, enum libbenchmark_datastructure_btree_au_pthread_rwlock_relative_position relative_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : relative_position can baue any value in its range
+
+ if( *baue == NULL )
+ return 0;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_READ( baus->lock );
+
+ switch( relative_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_UP:
+ *baue = (*baue)->up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_LEFT:
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_RIGHT:
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( baus, baue );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_RWLOCK_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( baus, baue );
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus, struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element
+ *left = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ /* TRD : from any given element, the next smallest element is;
+ 1. if we have a left, it's the largest element on the right branch of our left child
+ 2. if we don't have a left, and we're on the right of our parent, then it's our parent
+ 3. if we don't have a left, and we're on the left of our parent or we have no parent,
+ iterative up the tree until we find the first child who is on the right of its parent; then it's the parent
+ */
+
+ left = (*baue)->left;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( left != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD;
+
+ if( left == NULL and up != NULL and up_right == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (left == NULL and up == NULL) or (up != NULL and up_left == *baue and left == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ // TRD : eliminates a compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ *baue = left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_left = (*baue)->up->left;
+
+ if( *baue != NULL and up != NULL and *baue == up_left )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus, struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_pthread_rwlock_element
+ *right = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ right = (*baue)->right;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( right != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD;
+
+ if( right == NULL and up != NULL and up_left == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (right == NULL and up == NULL) or (up != NULL and up_right == *baue and right == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ // TRD : remove compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ *baue = right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_right = (*baue)->up->right;
+
+ if( *baue != NULL and up != NULL and *baue == up_right )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_pthread_rwlock_state *baus, struct libbenchmark_datastructure_btree_au_pthread_rwlock_element **baue, enum libbenchmark_datastructure_btree_au_pthread_rwlock_absolute_position absolute_position, enum libbenchmark_datastructure_btree_au_pthread_rwlock_relative_position relative_position )
+{
+ int
+ rv;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD: absolute_position can be any value in its range
+ // TRD: relative_position can be any value in its range
+
+ if( *baue == NULL )
+ rv = libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_absolute_position_for_read( baus, baue, absolute_position );
+ else
+ rv = libbenchmark_datastructure_btree_au_pthread_rwlock_get_by_relative_position_for_read( baus, baue, relative_position );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_btree_au_internal.h"
+
+/***** private prototypes *****/
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **baue );
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **baue );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_init( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_existing_key existing_key,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ baus->root = NULL;
+ baus->key_compare_function = key_compare_function;
+ baus->existing_key = existing_key;
+ baus->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_CREATE( baus->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_cleanup( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element *baue) )
+{
+ enum libbenchmark_datastructure_btree_au_delete_action
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF; // TRD : to remove compiler warning
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element
+ *baue,
+ *temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : element_delete_function can be NULL
+
+ if( element_cleanup_callback != NULL )
+ {
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_absolute_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_ABSOLUTE_POSITION_ROOT );
+
+ while( baue != NULL )
+ {
+ if( baue->left == NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF;
+
+ if( baue->left != NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD;
+
+ if( baue->left == NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD;
+
+ if( baue->left != NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT;
+
+ switch( delete_action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF:
+ // TRD : if we have a parent (we could be root) set his point to us to NULL
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = NULL;
+ if( baue->up->right == baue )
+ baue->up->right = NULL;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_UP );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD:
+ baue->left->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->left;
+ if( baue->up->right == baue )
+ baue->up->right = baue->left;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_LEFT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD:
+ baue->right->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->right;
+ if( baue->up->right == baue )
+ baue->up->right = baue->right;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_RIGHT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT:
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_LEFT );
+ break;
+ }
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_DESTROY( baus->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_insert_result libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_insert( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element *baue,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **existing_baue )
+{
+ int
+ compare_result = 0;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element
+ *baue_next = NULL,
+ *baue_parent = NULL,
+ *baue_temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : existing_baue can be NULL
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET( baus->lock );
+
+ baue->up = baue->left = baue->right = NULL;
+
+ baue_temp = baus->root;
+
+ while( baue_temp != NULL )
+ {
+ compare_result = baus->key_compare_function( baue->key, baue_temp->key );
+
+ if( compare_result == 0 )
+ {
+ if( existing_baue != NULL )
+ *existing_baue = baue_temp;
+
+ switch( baus->existing_key )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_EXISTING_KEY_OVERWRITE:
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_SET_VALUE_IN_ELEMENT( *baus, *baue_temp, baue->value );
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_INSERT_RESULT_SUCCESS_OVERWRITE;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_EXISTING_KEY_FAIL:
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_INSERT_RESULT_FAILURE_EXISTING_KEY;
+ break;
+ }
+ }
+
+ if( compare_result < 0 )
+ baue_next = baue_temp->left;
+
+ if( compare_result > 0 )
+ baue_next = baue_temp->right;
+
+ baue_parent = baue_temp;
+ baue_temp = baue_next;
+ }
+
+ if( baue_parent == NULL )
+ {
+ baue->up = baus->root;
+ baus->root = baue; }
+
+ if( baue_parent != NULL )
+ {
+ if( compare_result <= 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->left = baue;
+ }
+
+ if( compare_result > 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->right = baue;
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE( baus->lock );
+
+ // TRD : if we get to here, we added (not failed or overwrite on exist) a new element
+ if( existing_baue != NULL )
+ *existing_baue = NULL;
+
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_INSERT_RESULT_SUCCESS;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_key( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **baue )
+{
+ int
+ compare_result = !0,
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : key_compare_function can be NULL
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ if( key_compare_function == NULL )
+ key_compare_function = baus->key_compare_function;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET( baus->lock );
+
+ *baue = baus->root;
+
+ while( *baue != NULL and compare_result != 0 )
+ {
+ compare_result = key_compare_function( key, (*baue)->key );
+
+ if( compare_result < 0 )
+ *baue = (*baue)->left;
+
+ if( compare_result > 0 )
+ *baue = (*baue)->right;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **baue, enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_absolute_position absolute_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : absolute_position can be any value in its range
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET( baus->lock );
+
+ *baue = baus->root;
+
+ switch( absolute_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_ABSOLUTE_POSITION_ROOT:
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_ABSOLUTE_POSITION_LARGEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_ABSOLUTE_POSITION_SMALLEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_relative_position( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **baue, enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_relative_position relative_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : relative_position can baue any value in its range
+
+ if( *baue == NULL )
+ return 0;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET( baus->lock );
+
+ switch( relative_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_UP:
+ *baue = (*baue)->up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_LEFT:
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_RIGHT:
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( baus, baue );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( baus, baue );
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element
+ *left = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ /* TRD : from any given element, the next smallest element is;
+ 1. if we have a left, it's the largest element on the right branch of our left child
+ 2. if we don't have a left, and we're on the right of our parent, then it's our parent
+ 3. if we don't have a left, and we're on the left of our parent or we have no parent,
+ iterative up the tree until we find the first child who is on the right of its parent; then it's the parent
+ */
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET( baus->lock );
+
+ left = (*baue)->left;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( left != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD;
+
+ if( left == NULL and up != NULL and up_right == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (left == NULL and up == NULL) or (up != NULL and up_left == *baue and left == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ // TRD : eliminates a compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ *baue = left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_left = (*baue)->up->left;
+
+ if( *baue != NULL and up != NULL and *baue == up_left )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE( baus->lock );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element
+ *right = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET( baus->lock );
+
+ right = (*baue)->right;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( right != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD;
+
+ if( right == NULL and up != NULL and up_left == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (right == NULL and up == NULL) or (up != NULL and up_right == *baue and right == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ // TRD : remove compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ *baue = right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_right = (*baue)->up->right;
+
+ if( *baue != NULL and up != NULL and *baue == up_right )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE( baus->lock );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_element **baue, enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_absolute_position absolute_position, enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_relative_position relative_position )
+{
+ int
+ rv;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD: absolute_position can be any value in its range
+ // TRD: relative_position can be any value in its range
+
+ if( *baue == NULL )
+ rv = libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_absolute_position( baus, baue, absolute_position );
+ else
+ rv = libbenchmark_datastructure_btree_au_pthread_spinlock_process_private_get_by_relative_position( baus, baue, relative_position );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_btree_au_internal.h"
+
+/***** private prototypes *****/
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **baue );
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **baue );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_init( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_existing_key existing_key,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ baus->root = NULL;
+ baus->key_compare_function = key_compare_function;
+ baus->existing_key = existing_key;
+ baus->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_CREATE( baus->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_cleanup( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element *baue) )
+{
+ enum libbenchmark_datastructure_btree_au_delete_action
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF; // TRD : to remove compiler warning
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element
+ *baue,
+ *temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : element_delete_function can be NULL
+
+ if( element_cleanup_callback != NULL )
+ {
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_absolute_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_ABSOLUTE_POSITION_ROOT );
+
+ while( baue != NULL )
+ {
+ if( baue->left == NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF;
+
+ if( baue->left != NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD;
+
+ if( baue->left == NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD;
+
+ if( baue->left != NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT;
+
+ switch( delete_action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF:
+ // TRD : if we have a parent (we could be root) set his point to us to NULL
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = NULL;
+ if( baue->up->right == baue )
+ baue->up->right = NULL;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_UP );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD:
+ baue->left->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->left;
+ if( baue->up->right == baue )
+ baue->up->right = baue->left;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_LEFT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD:
+ baue->right->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->right;
+ if( baue->up->right == baue )
+ baue->up->right = baue->right;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_RIGHT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT:
+ libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_LEFT );
+ break;
+ }
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_DESTROY( baus->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_insert_result libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_insert( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element *baue,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **existing_baue )
+{
+ int
+ compare_result = 0;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element
+ *baue_next = NULL,
+ *baue_parent = NULL,
+ *baue_temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : existing_baue can be NULL
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET( baus->lock );
+
+ baue->up = baue->left = baue->right = NULL;
+
+ baue_temp = baus->root;
+
+ while( baue_temp != NULL )
+ {
+ compare_result = baus->key_compare_function( baue->key, baue_temp->key );
+
+ if( compare_result == 0 )
+ {
+ if( existing_baue != NULL )
+ *existing_baue = baue_temp;
+
+ switch( baus->existing_key )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_EXISTING_KEY_OVERWRITE:
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_SET_VALUE_IN_ELEMENT( *baus, *baue_temp, baue->value );
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_INSERT_RESULT_SUCCESS_OVERWRITE;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_EXISTING_KEY_FAIL:
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_INSERT_RESULT_FAILURE_EXISTING_KEY;
+ break;
+ }
+ }
+
+ if( compare_result < 0 )
+ baue_next = baue_temp->left;
+
+ if( compare_result > 0 )
+ baue_next = baue_temp->right;
+
+ baue_parent = baue_temp;
+ baue_temp = baue_next;
+ }
+
+ if( baue_parent == NULL )
+ {
+ baue->up = baus->root;
+ baus->root = baue; }
+
+ if( baue_parent != NULL )
+ {
+ if( compare_result <= 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->left = baue;
+ }
+
+ if( compare_result > 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->right = baue;
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE( baus->lock );
+
+ // TRD : if we get to here, we added (not failed or overwrite on exist) a new element
+ if( existing_baue != NULL )
+ *existing_baue = NULL;
+
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_INSERT_RESULT_SUCCESS;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_key( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **baue )
+{
+ int
+ compare_result = !0,
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : key_compare_function can be NULL
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ if( key_compare_function == NULL )
+ key_compare_function = baus->key_compare_function;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET( baus->lock );
+
+ *baue = baus->root;
+
+ while( *baue != NULL and compare_result != 0 )
+ {
+ compare_result = key_compare_function( key, (*baue)->key );
+
+ if( compare_result < 0 )
+ *baue = (*baue)->left;
+
+ if( compare_result > 0 )
+ *baue = (*baue)->right;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **baue, enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_absolute_position absolute_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : absolute_position can be any value in its range
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET( baus->lock );
+
+ *baue = baus->root;
+
+ switch( absolute_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_ABSOLUTE_POSITION_ROOT:
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_ABSOLUTE_POSITION_LARGEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_ABSOLUTE_POSITION_SMALLEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_relative_position( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **baue, enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_relative_position relative_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : relative_position can baue any value in its range
+
+ if( *baue == NULL )
+ return 0;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET( baus->lock );
+
+ switch( relative_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_UP:
+ *baue = (*baue)->up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_LEFT:
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_RIGHT:
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( baus, baue );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_PTHREAD_SPINLOCK_PROCESS_SHARED_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( baus, baue );
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element
+ *left = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ /* TRD : from any given element, the next smallest element is;
+ 1. if we have a left, it's the largest element on the right branch of our left child
+ 2. if we don't have a left, and we're on the right of our parent, then it's our parent
+ 3. if we don't have a left, and we're on the left of our parent or we have no parent,
+ iterative up the tree until we find the first child who is on the right of its parent; then it's the parent
+ */
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET( baus->lock );
+
+ left = (*baue)->left;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( left != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD;
+
+ if( left == NULL and up != NULL and up_right == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (left == NULL and up == NULL) or (up != NULL and up_left == *baue and left == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ // TRD : eliminates a compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ *baue = left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_left = (*baue)->up->left;
+
+ if( *baue != NULL and up != NULL and *baue == up_left )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE( baus->lock );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element
+ *right = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET( baus->lock );
+
+ right = (*baue)->right;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( right != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD;
+
+ if( right == NULL and up != NULL and up_left == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (right == NULL and up == NULL) or (up != NULL and up_right == *baue and right == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ // TRD : remove compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ *baue = right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_right = (*baue)->up->right;
+
+ if( *baue != NULL and up != NULL and *baue == up_right )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE( baus->lock );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_state *baus, struct libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_element **baue, enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_absolute_position absolute_position, enum libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_relative_position relative_position )
+{
+ int
+ rv;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD: absolute_position can be any value in its range
+ // TRD: relative_position can be any value in its range
+
+ if( *baue == NULL )
+ rv = libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_absolute_position( baus, baue, absolute_position );
+ else
+ rv = libbenchmark_datastructure_btree_au_pthread_spinlock_process_shared_get_by_relative_position( baus, baue, relative_position );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_btree_au_internal.h"
+
+/***** private prototypes *****/
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus, struct libbenchmark_datastructure_btree_au_windows_critical_section_element **baue );
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus, struct libbenchmark_datastructure_btree_au_windows_critical_section_element **baue );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_windows_critical_section_init( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_windows_critical_section_existing_key existing_key,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ baus->root = NULL;
+ baus->key_compare_function = key_compare_function;
+ baus->existing_key = existing_key;
+ baus->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_CREATE( baus->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_windows_critical_section_cleanup( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus, struct libbenchmark_datastructure_btree_au_windows_critical_section_element *baue) )
+{
+ enum libbenchmark_datastructure_btree_au_delete_action
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF; // TRD : to remove compiler warning
+
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element
+ *baue,
+ *temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : element_delete_function can be NULL
+
+ if( element_cleanup_callback != NULL )
+ {
+ libbenchmark_datastructure_btree_au_windows_critical_section_get_by_absolute_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_ABSOLUTE_POSITION_ROOT );
+
+ while( baue != NULL )
+ {
+ if( baue->left == NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF;
+
+ if( baue->left != NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD;
+
+ if( baue->left == NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD;
+
+ if( baue->left != NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT;
+
+ switch( delete_action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF:
+ // TRD : if we have a parent (we could be root) set his point to us to NULL
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = NULL;
+ if( baue->up->right == baue )
+ baue->up->right = NULL;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_windows_critical_section_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_UP );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD:
+ baue->left->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->left;
+ if( baue->up->right == baue )
+ baue->up->right = baue->left;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_windows_critical_section_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_LEFT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD:
+ baue->right->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->right;
+ if( baue->up->right == baue )
+ baue->up->right = baue->right;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_windows_critical_section_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_RIGHT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT:
+ libbenchmark_datastructure_btree_au_windows_critical_section_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_LEFT );
+ break;
+ }
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_DESTROY( baus->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+enum libbenchmark_datastructure_btree_au_windows_critical_section_insert_result libbenchmark_datastructure_btree_au_windows_critical_section_insert( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus,
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element *baue,
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element **existing_baue )
+{
+ int
+ compare_result = 0;
+
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element
+ *baue_next = NULL,
+ *baue_parent = NULL,
+ *baue_temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : existing_baue can be NULL
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_GET( baus->lock );
+
+ baue->up = baue->left = baue->right = NULL;
+
+ baue_temp = baus->root;
+
+ while( baue_temp != NULL )
+ {
+ compare_result = baus->key_compare_function( baue->key, baue_temp->key );
+
+ if( compare_result == 0 )
+ {
+ if( existing_baue != NULL )
+ *existing_baue = baue_temp;
+
+ switch( baus->existing_key )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_EXISTING_KEY_OVERWRITE:
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_SET_VALUE_IN_ELEMENT( *baus, *baue_temp, baue->value );
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_INSERT_RESULT_SUCCESS_OVERWRITE;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_EXISTING_KEY_FAIL:
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_INSERT_RESULT_FAILURE_EXISTING_KEY;
+ break;
+ }
+ }
+
+ if( compare_result < 0 )
+ baue_next = baue_temp->left;
+
+ if( compare_result > 0 )
+ baue_next = baue_temp->right;
+
+ baue_parent = baue_temp;
+ baue_temp = baue_next;
+ }
+
+ if( baue_parent == NULL )
+ {
+ baue->up = baus->root;
+ baus->root = baue; }
+
+ if( baue_parent != NULL )
+ {
+ if( compare_result <= 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->left = baue;
+ }
+
+ if( compare_result > 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->right = baue;
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_RELEASE( baus->lock );
+
+ // TRD : if we get to here, we added (not failed or overwrite on exist) a new element
+ if( existing_baue != NULL )
+ *existing_baue = NULL;
+
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_INSERT_RESULT_SUCCESS;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_windows_critical_section_get_by_key( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element **baue )
+{
+ int
+ compare_result = !0,
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : key_compare_function can be NULL
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ if( key_compare_function == NULL )
+ key_compare_function = baus->key_compare_function;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_GET( baus->lock );
+
+ *baue = baus->root;
+
+ while( *baue != NULL and compare_result != 0 )
+ {
+ compare_result = key_compare_function( key, (*baue)->key );
+
+ if( compare_result < 0 )
+ *baue = (*baue)->left;
+
+ if( compare_result > 0 )
+ *baue = (*baue)->right;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_windows_critical_section_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus, struct libbenchmark_datastructure_btree_au_windows_critical_section_element **baue, enum libbenchmark_datastructure_btree_au_windows_critical_section_absolute_position absolute_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : absolute_position can be any value in its range
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_GET( baus->lock );
+
+ *baue = baus->root;
+
+ switch( absolute_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_ABSOLUTE_POSITION_ROOT:
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_ABSOLUTE_POSITION_LARGEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_ABSOLUTE_POSITION_SMALLEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_windows_critical_section_get_by_relative_position( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus, struct libbenchmark_datastructure_btree_au_windows_critical_section_element **baue, enum libbenchmark_datastructure_btree_au_windows_critical_section_relative_position relative_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : relative_position can baue any value in its range
+
+ if( *baue == NULL )
+ return 0;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_GET( baus->lock );
+
+ switch( relative_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_UP:
+ *baue = (*baue)->up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_LEFT:
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_RIGHT:
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( baus, baue );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_CRITICAL_SECTION_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( baus, baue );
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus, struct libbenchmark_datastructure_btree_au_windows_critical_section_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element
+ *left = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ /* TRD : from any given element, the next smallest element is;
+ 1. if we have a left, it's the largest element on the right branch of our left child
+ 2. if we don't have a left, and we're on the right of our parent, then it's our parent
+ 3. if we don't have a left, and we're on the left of our parent or we have no parent,
+ iterative up the tree until we find the first child who is on the right of its parent; then it's the parent
+ */
+
+ left = (*baue)->left;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( left != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD;
+
+ if( left == NULL and up != NULL and up_right == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (left == NULL and up == NULL) or (up != NULL and up_left == *baue and left == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ // TRD : eliminates a compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ *baue = left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_left = (*baue)->up->left;
+
+ if( *baue != NULL and up != NULL and *baue == up_left )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus, struct libbenchmark_datastructure_btree_au_windows_critical_section_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_windows_critical_section_element
+ *right = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ right = (*baue)->right;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( right != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD;
+
+ if( right == NULL and up != NULL and up_left == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (right == NULL and up == NULL) or (up != NULL and up_right == *baue and right == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ // TRD : remove compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ *baue = right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_right = (*baue)->up->right;
+
+ if( *baue != NULL and up != NULL and *baue == up_right )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_windows_critical_section_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_windows_critical_section_state *baus, struct libbenchmark_datastructure_btree_au_windows_critical_section_element **baue, enum libbenchmark_datastructure_btree_au_windows_critical_section_absolute_position absolute_position, enum libbenchmark_datastructure_btree_au_windows_critical_section_relative_position relative_position )
+{
+ int
+ rv;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD: absolute_position can be any value in its range
+ // TRD: relative_position can be any value in its range
+
+ if( *baue == NULL )
+ rv = libbenchmark_datastructure_btree_au_windows_critical_section_get_by_absolute_position( baus, baue, absolute_position );
+ else
+ rv = libbenchmark_datastructure_btree_au_windows_critical_section_get_by_relative_position( baus, baue, relative_position );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_btree_au_internal.h"
+
+/***** private prototypes *****/
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus, struct libbenchmark_datastructure_btree_au_windows_mutex_element **baue );
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus, struct libbenchmark_datastructure_btree_au_windows_mutex_element **baue );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_windows_mutex_init( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ enum libbenchmark_datastructure_btree_au_windows_mutex_existing_key existing_key,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( key_compare_function != NULL );
+ // TRD : existing_key can be any value in its range
+ // TRD : user_state can be NULL
+
+ baus->root = NULL;
+ baus->key_compare_function = key_compare_function;
+ baus->existing_key = existing_key;
+ baus->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_CREATE( baus->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_btree_au_windows_mutex_cleanup( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus, struct libbenchmark_datastructure_btree_au_windows_mutex_element *baue) )
+{
+ enum libbenchmark_datastructure_btree_au_delete_action
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF; // TRD : to remove compiler warning
+
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element
+ *baue,
+ *temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : element_delete_function can be NULL
+
+ if( element_cleanup_callback != NULL )
+ {
+ libbenchmark_datastructure_btree_au_windows_mutex_get_by_absolute_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_ABSOLUTE_POSITION_ROOT );
+
+ while( baue != NULL )
+ {
+ if( baue->left == NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF;
+
+ if( baue->left != NULL and baue->right == NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD;
+
+ if( baue->left == NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD;
+
+ if( baue->left != NULL and baue->right != NULL )
+ delete_action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT;
+
+ switch( delete_action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF:
+ // TRD : if we have a parent (we could be root) set his point to us to NULL
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = NULL;
+ if( baue->up->right == baue )
+ baue->up->right = NULL;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_windows_mutex_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_UP );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_LEFT_CHILD:
+ baue->left->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->left;
+ if( baue->up->right == baue )
+ baue->up->right = baue->left;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_windows_mutex_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_LEFT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_SELF_REPLACE_WITH_RIGHT_CHILD:
+ baue->right->up = baue->up;
+ if( baue->up != NULL )
+ {
+ if( baue->up->left == baue )
+ baue->up->left = baue->right;
+ if( baue->up->right == baue )
+ baue->up->right = baue->right;
+ }
+
+ temp = baue;
+ libbenchmark_datastructure_btree_au_windows_mutex_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_RIGHT );
+ element_cleanup_callback( baus, temp );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_DELETE_MOVE_LEFT:
+ libbenchmark_datastructure_btree_au_windows_mutex_get_by_relative_position( baus, &baue, LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_LEFT );
+ break;
+ }
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_DESTROY( baus->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+enum libbenchmark_datastructure_btree_au_windows_mutex_insert_result libbenchmark_datastructure_btree_au_windows_mutex_insert( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus,
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element *baue,
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element **existing_baue )
+{
+ int
+ compare_result = 0;
+
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element
+ *baue_next = NULL,
+ *baue_parent = NULL,
+ *baue_temp;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : existing_baue can be NULL
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_GET( baus->lock );
+
+ baue->up = baue->left = baue->right = NULL;
+
+ baue_temp = baus->root;
+
+ while( baue_temp != NULL )
+ {
+ compare_result = baus->key_compare_function( baue->key, baue_temp->key );
+
+ if( compare_result == 0 )
+ {
+ if( existing_baue != NULL )
+ *existing_baue = baue_temp;
+
+ switch( baus->existing_key )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_EXISTING_KEY_OVERWRITE:
+ LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_SET_VALUE_IN_ELEMENT( *baus, *baue_temp, baue->value );
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_INSERT_RESULT_SUCCESS_OVERWRITE;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_EXISTING_KEY_FAIL:
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_INSERT_RESULT_FAILURE_EXISTING_KEY;
+ break;
+ }
+ }
+
+ if( compare_result < 0 )
+ baue_next = baue_temp->left;
+
+ if( compare_result > 0 )
+ baue_next = baue_temp->right;
+
+ baue_parent = baue_temp;
+ baue_temp = baue_next;
+ }
+
+ if( baue_parent == NULL )
+ {
+ baue->up = baus->root;
+ baus->root = baue; }
+
+ if( baue_parent != NULL )
+ {
+ if( compare_result <= 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->left = baue;
+ }
+
+ if( compare_result > 0 )
+ {
+ baue->up = baue_parent;
+ baue_parent->right = baue;
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_RELEASE( baus->lock );
+
+ // TRD : if we get to here, we added (not failed or overwrite on exist) a new element
+ if( existing_baue != NULL )
+ *existing_baue = NULL;
+
+ return LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_INSERT_RESULT_SUCCESS;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_windows_mutex_get_by_key( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus,
+ int (*key_compare_function)(void const *new_key, void const *existing_key),
+ void *key,
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element **baue )
+{
+ int
+ compare_result = !0,
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ // TRD : key_compare_function can be NULL
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ if( key_compare_function == NULL )
+ key_compare_function = baus->key_compare_function;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_GET( baus->lock );
+
+ *baue = baus->root;
+
+ while( *baue != NULL and compare_result != 0 )
+ {
+ compare_result = key_compare_function( key, (*baue)->key );
+
+ if( compare_result < 0 )
+ *baue = (*baue)->left;
+
+ if( compare_result > 0 )
+ *baue = (*baue)->right;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_windows_mutex_get_by_absolute_position( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus, struct libbenchmark_datastructure_btree_au_windows_mutex_element **baue, enum libbenchmark_datastructure_btree_au_windows_mutex_absolute_position absolute_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : absolute_position can be any value in its range
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_GET( baus->lock );
+
+ *baue = baus->root;
+
+ switch( absolute_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_ABSOLUTE_POSITION_ROOT:
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_ABSOLUTE_POSITION_LARGEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_ABSOLUTE_POSITION_SMALLEST_IN_TREE:
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_windows_mutex_get_by_relative_position( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus, struct libbenchmark_datastructure_btree_au_windows_mutex_element **baue, enum libbenchmark_datastructure_btree_au_windows_mutex_relative_position relative_position )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD : relative_position can baue any value in its range
+
+ if( *baue == NULL )
+ return 0;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_GET( baus->lock );
+
+ switch( relative_position )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_UP:
+ *baue = (*baue)->up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_LEFT:
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_RIGHT:
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_SMALLEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_LARGEST_ELEMENT_BELOW_CURRENT_ELEMENT:
+ *baue = (*baue)->right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( baus, baue );
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_WINDOWS_MUTEX_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE:
+ libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( baus, baue );
+ break;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_RELEASE( baus->lock );
+
+ if( *baue == NULL )
+ rv = 0;
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_largest_get_next_smallest_element( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus, struct libbenchmark_datastructure_btree_au_windows_mutex_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element
+ *left = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ /* TRD : from any given element, the next smallest element is;
+ 1. if we have a left, it's the largest element on the right branch of our left child
+ 2. if we don't have a left, and we're on the right of our parent, then it's our parent
+ 3. if we don't have a left, and we're on the left of our parent or we have no parent,
+ iterative up the tree until we find the first child who is on the right of its parent; then it's the parent
+ */
+
+ left = (*baue)->left;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( left != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD;
+
+ if( left == NULL and up != NULL and up_right == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (left == NULL and up == NULL) or (up != NULL and up_left == *baue and left == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ // TRD : eliminates a compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ *baue = left;
+ if( *baue != NULL )
+ while( (*baue)->right != NULL )
+ *baue = (*baue)->right;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_left = (*baue)->up->left;
+
+ if( *baue != NULL and up != NULL and *baue == up_left )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void libbenchmark_datastructure_btree_au_internal_inorder_walk_from_smallest_get_next_largest_element( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus, struct libbenchmark_datastructure_btree_au_windows_mutex_element **baue )
+{
+ enum libbenchmark_datastructure_btree_au_move
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID;
+
+ enum flag
+ finished_flag = LOWERED;
+
+ struct libbenchmark_datastructure_btree_au_windows_mutex_element
+ *right = NULL,
+ *up = NULL,
+ *up_left = NULL,
+ *up_right = NULL;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ right = (*baue)->right;
+ up = (*baue)->up;
+ if( up != NULL )
+ {
+ up_left = (*baue)->up->left;
+ up_right = (*baue)->up->right;
+ }
+
+ if( right != NULL )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD;
+
+ if( right == NULL and up != NULL and up_left == *baue )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT;
+
+ if( (right == NULL and up == NULL) or (up != NULL and up_right == *baue and right == NULL) )
+ action = LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE;
+
+ switch( action )
+ {
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_INVALID:
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_LARGEST_FROM_LEFT_CHILD:
+ // TRD : remove compiler warning
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_SMALLEST_FROM_RIGHT_CHILD:
+ *baue = right;
+ if( *baue != NULL )
+ while( (*baue)->left != NULL )
+ *baue = (*baue)->left;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_GET_PARENT:
+ *baue = up;
+ break;
+
+ case LIBBENCHMARK_DATASTRUCTURE_BTREE_AU_MOVE_MOVE_UP_TREE:
+ while( finished_flag == LOWERED )
+ {
+ up = (*baue)->up;
+ if( up != NULL )
+ up_right = (*baue)->up->right;
+
+ if( *baue != NULL and up != NULL and *baue == up_right )
+ *baue = up;
+ else
+ finished_flag = RAISED;
+ }
+
+ *baue = up;
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_btree_au_windows_mutex_get_by_absolute_position_and_then_by_relative_position( struct libbenchmark_datastructure_btree_au_windows_mutex_state *baus, struct libbenchmark_datastructure_btree_au_windows_mutex_element **baue, enum libbenchmark_datastructure_btree_au_windows_mutex_absolute_position absolute_position, enum libbenchmark_datastructure_btree_au_windows_mutex_relative_position relative_position )
+{
+ int
+ rv;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+ // TRD: absolute_position can be any value in its range
+ // TRD: relative_position can be any value in its range
+
+ if( *baue == NULL )
+ rv = libbenchmark_datastructure_btree_au_windows_mutex_get_by_absolute_position( baus, baue, absolute_position );
+ else
+ rv = libbenchmark_datastructure_btree_au_windows_mutex_get_by_relative_position( baus, baue, relative_position );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_gcc_spinlock_atomic_init( struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state *fs, void *user_state )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ fs->top = NULL;
+ fs->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_CREATE( fs->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_gcc_spinlock_atomic_cleanup( struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state *fs, struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element *fe, void *user_state) )
+{
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element
+ *fe,
+ *fe_temp;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ // TRD : element_pop_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_pop_callback != NULL )
+ {
+ fe = fs->top;
+
+ while( fe != NULL )
+ {
+ fe_temp = fe;
+ fe = fe->next;
+
+ element_pop_callback( fs, fe_temp, (void *) fs->user_state );
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_DESTROY( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_gcc_spinlock_atomic_push( struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state *fs, struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element *fe )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_GET( fs->lock );
+
+ fe->next = fs->top;
+ fs->top = fe;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_RELEASE( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int libbenchmark_datastructure_freelist_gcc_spinlock_atomic_pop( struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_state *fs, struct libbenchmark_datastructure_freelist_gcc_spinlock_atomic_element **fe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_GET( fs->lock );
+
+ *fe = fs->top;
+
+ if( fs->top != NULL )
+ fs->top = fs->top->next;
+ else
+ rv = 0;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_RELEASE( fs->lock );
+
+ return rv;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_gcc_spinlock_sync_init( struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state *fs, void *user_state )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ fs->top = NULL;
+ fs->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_CREATE( fs->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_gcc_spinlock_sync_cleanup( struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state *fs, struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element *fe, void *user_state) )
+{
+ struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element
+ *fe,
+ *fe_temp;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ // TRD : element_pop_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_pop_callback != NULL )
+ {
+ fe = fs->top;
+
+ while( fe != NULL )
+ {
+ fe_temp = fe;
+ fe = fe->next;
+
+ element_pop_callback( fs, fe_temp, (void *) fs->user_state );
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_DESTROY( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_gcc_spinlock_sync_push( struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state *fs, struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element *fe )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_GET( fs->lock );
+
+ fe->next = fs->top;
+ fs->top = fe;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_RELEASE( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int libbenchmark_datastructure_freelist_gcc_spinlock_sync_pop( struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_state *fs, struct libbenchmark_datastructure_freelist_gcc_spinlock_sync_element **fe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_GET( fs->lock );
+
+ *fe = fs->top;
+
+ if( fs->top != NULL )
+ fs->top = fs->top->next;
+ else
+ rv = 0;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_RELEASE( fs->lock );
+
+ return rv;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_msvc_spinlock_init( struct libbenchmark_datastructure_freelist_msvc_spinlock_state *fs, void *user_state )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ fs->top = NULL;
+ fs->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_CREATE( fs->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_msvc_spinlock_cleanup( struct libbenchmark_datastructure_freelist_msvc_spinlock_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_msvc_spinlock_state *fs, struct libbenchmark_datastructure_freelist_msvc_spinlock_element *fe, void *user_state) )
+{
+ struct libbenchmark_datastructure_freelist_msvc_spinlock_element
+ *fe,
+ *fe_temp;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ // TRD : element_pop_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_pop_callback != NULL )
+ {
+ fe = fs->top;
+
+ while( fe != NULL )
+ {
+ fe_temp = fe;
+ fe = fe->next;
+
+ element_pop_callback( fs, fe_temp, (void *) fs->user_state );
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_DESTROY( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_msvc_spinlock_push( struct libbenchmark_datastructure_freelist_msvc_spinlock_state *fs, struct libbenchmark_datastructure_freelist_msvc_spinlock_element *fe )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_GET( fs->lock );
+
+ fe->next = fs->top;
+ fs->top = fe;
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_RELEASE( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int libbenchmark_datastructure_freelist_msvc_spinlock_pop( struct libbenchmark_datastructure_freelist_msvc_spinlock_state *fs, struct libbenchmark_datastructure_freelist_msvc_spinlock_element **fe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_GET( fs->lock );
+
+ *fe = fs->top;
+
+ if( fs->top != NULL )
+ fs->top = fs->top->next;
+ else
+ rv = 0;
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_RELEASE( fs->lock );
+
+ return rv;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_pthread_mutex_init( struct libbenchmark_datastructure_freelist_pthread_mutex_state *fs, void *user_state )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ fs->top = NULL;
+ fs->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_CREATE( fs->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_pthread_mutex_cleanup( struct libbenchmark_datastructure_freelist_pthread_mutex_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_pthread_mutex_state *fs, struct libbenchmark_datastructure_freelist_pthread_mutex_element *fe, void *user_state) )
+{
+ struct libbenchmark_datastructure_freelist_pthread_mutex_element
+ *fe,
+ *fe_temp;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ // TRD : element_pop_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_pop_callback != NULL )
+ {
+ fe = fs->top;
+
+ while( fe != NULL )
+ {
+ fe_temp = fe;
+ fe = fe->next;
+
+ element_pop_callback( fs, fe_temp, (void *) fs->user_state );
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_DESTROY( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_pthread_mutex_push( struct libbenchmark_datastructure_freelist_pthread_mutex_state *fs, struct libbenchmark_datastructure_freelist_pthread_mutex_element *fe )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_GET( fs->lock );
+
+ fe->next = fs->top;
+ fs->top = fe;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_RELEASE( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int libbenchmark_datastructure_freelist_pthread_mutex_pop( struct libbenchmark_datastructure_freelist_pthread_mutex_state *fs, struct libbenchmark_datastructure_freelist_pthread_mutex_element **fe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_GET( fs->lock );
+
+ *fe = fs->top;
+
+ if( fs->top != NULL )
+ fs->top = fs->top->next;
+ else
+ rv = 0;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_RELEASE( fs->lock );
+
+ return rv;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_pthread_rwlock_init( struct libbenchmark_datastructure_freelist_pthread_rwlock_state *fs, void *user_state )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ fs->top = NULL;
+ fs->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_CREATE( fs->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_pthread_rwlock_cleanup( struct libbenchmark_datastructure_freelist_pthread_rwlock_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_pthread_rwlock_state *fs, struct libbenchmark_datastructure_freelist_pthread_rwlock_element *fe, void *user_state) )
+{
+ struct libbenchmark_datastructure_freelist_pthread_rwlock_element
+ *fe,
+ *fe_temp;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ // TRD : element_pop_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_pop_callback != NULL )
+ {
+ fe = fs->top;
+
+ while( fe != NULL )
+ {
+ fe_temp = fe;
+ fe = fe->next;
+
+ element_pop_callback( fs, fe_temp, (void *) fs->user_state );
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_DESTROY( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_pthread_rwlock_push( struct libbenchmark_datastructure_freelist_pthread_rwlock_state *fs, struct libbenchmark_datastructure_freelist_pthread_rwlock_element *fe )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_WRITE( fs->lock );
+
+ fe->next = fs->top;
+ fs->top = fe;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_RELEASE( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int libbenchmark_datastructure_freelist_pthread_rwlock_pop( struct libbenchmark_datastructure_freelist_pthread_rwlock_state *fs, struct libbenchmark_datastructure_freelist_pthread_rwlock_element **fe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_WRITE( fs->lock );
+
+ *fe = fs->top;
+
+ if( fs->top != NULL )
+ fs->top = fs->top->next;
+ else
+ rv = 0;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_RELEASE( fs->lock );
+
+ return rv;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_pthread_spinlock_process_private_init( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state *fs, void *user_state )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ fs->top = NULL;
+ fs->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_CREATE( fs->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_pthread_spinlock_process_private_cleanup( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state *fs, struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element *fe, void *user_state) )
+{
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element
+ *fe,
+ *fe_temp;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ // TRD : element_pop_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_pop_callback != NULL )
+ {
+ fe = fs->top;
+
+ while( fe != NULL )
+ {
+ fe_temp = fe;
+ fe = fe->next;
+
+ element_pop_callback( fs, fe_temp, (void *) fs->user_state );
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_DESTROY( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_pthread_spinlock_process_private_push( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state *fs, struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element *fe )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET( fs->lock );
+
+ fe->next = fs->top;
+ fs->top = fe;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int libbenchmark_datastructure_freelist_pthread_spinlock_process_private_pop( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_state *fs, struct libbenchmark_datastructure_freelist_pthread_spinlock_process_private_element **fe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET( fs->lock );
+
+ *fe = fs->top;
+
+ if( fs->top != NULL )
+ fs->top = fs->top->next;
+ else
+ rv = 0;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE( fs->lock );
+
+ return rv;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_init( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state *fs, void *user_state )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ fs->top = NULL;
+ fs->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_CREATE( fs->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_cleanup( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state *fs, struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element *fe, void *user_state) )
+{
+ struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element
+ *fe,
+ *fe_temp;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ // TRD : element_pop_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_pop_callback != NULL )
+ {
+ fe = fs->top;
+
+ while( fe != NULL )
+ {
+ fe_temp = fe;
+ fe = fe->next;
+
+ element_pop_callback( fs, fe_temp, (void *) fs->user_state );
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_DESTROY( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_push( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state *fs, struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element *fe )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET( fs->lock );
+
+ fe->next = fs->top;
+ fs->top = fe;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_pop( struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_state *fs, struct libbenchmark_datastructure_freelist_pthread_spinlock_process_shared_element **fe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET( fs->lock );
+
+ *fe = fs->top;
+
+ if( fs->top != NULL )
+ fs->top = fs->top->next;
+ else
+ rv = 0;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE( fs->lock );
+
+ return rv;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_windows_critical_section_init( struct libbenchmark_datastructure_freelist_windows_critical_section_state *fs, void *user_state )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ fs->top = NULL;
+ fs->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_CREATE( fs->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_windows_critical_section_cleanup( struct libbenchmark_datastructure_freelist_windows_critical_section_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_windows_critical_section_state *fs, struct libbenchmark_datastructure_freelist_windows_critical_section_element *fe, void *user_state) )
+{
+ struct libbenchmark_datastructure_freelist_windows_critical_section_element
+ *fe,
+ *fe_temp;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ // TRD : element_pop_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_pop_callback != NULL )
+ {
+ fe = fs->top;
+
+ while( fe != NULL )
+ {
+ fe_temp = fe;
+ fe = fe->next;
+
+ element_pop_callback( fs, fe_temp, (void *) fs->user_state );
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_DESTROY( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_windows_critical_section_push( struct libbenchmark_datastructure_freelist_windows_critical_section_state *fs, struct libbenchmark_datastructure_freelist_windows_critical_section_element *fe )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_GET( fs->lock );
+
+ fe->next = fs->top;
+ fs->top = fe;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_RELEASE( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int libbenchmark_datastructure_freelist_windows_critical_section_pop( struct libbenchmark_datastructure_freelist_windows_critical_section_state *fs, struct libbenchmark_datastructure_freelist_windows_critical_section_element **fe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_GET( fs->lock );
+
+ *fe = fs->top;
+
+ if( fs->top != NULL )
+ fs->top = fs->top->next;
+ else
+ rv = 0;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_RELEASE( fs->lock );
+
+ return rv;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_freelist_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_windows_mutex_init( struct libbenchmark_datastructure_freelist_windows_mutex_state *fs, void *user_state )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ fs->top = NULL;
+ fs->user_state = user_state;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_CREATE( fs->lock );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_windows_mutex_cleanup( struct libbenchmark_datastructure_freelist_windows_mutex_state *fs, void (*element_pop_callback)(struct libbenchmark_datastructure_freelist_windows_mutex_state *fs, struct libbenchmark_datastructure_freelist_windows_mutex_element *fe, void *user_state) )
+{
+ struct libbenchmark_datastructure_freelist_windows_mutex_element
+ *fe,
+ *fe_temp;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ // TRD : element_pop_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_pop_callback != NULL )
+ {
+ fe = fs->top;
+
+ while( fe != NULL )
+ {
+ fe_temp = fe;
+ fe = fe->next;
+
+ element_pop_callback( fs, fe_temp, (void *) fs->user_state );
+ }
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_DESTROY( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_freelist_windows_mutex_push( struct libbenchmark_datastructure_freelist_windows_mutex_state *fs, struct libbenchmark_datastructure_freelist_windows_mutex_element *fe )
+{
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_GET( fs->lock );
+
+ fe->next = fs->top;
+ fs->top = fe;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_RELEASE( fs->lock );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+int libbenchmark_datastructure_freelist_windows_mutex_pop( struct libbenchmark_datastructure_freelist_windows_mutex_state *fs, struct libbenchmark_datastructure_freelist_windows_mutex_element **fe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( fs != NULL );
+ LFDS710_PAL_ASSERT( fe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_GET( fs->lock );
+
+ *fe = fs->top;
+
+ if( fs->top != NULL )
+ fs->top = fs->top->next;
+ else
+ rv = 0;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_RELEASE( fs->lock );
+
+ return rv;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_queue_umm_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_init( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element *qe, void *user_state )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ qe->next = NULL;
+
+ qs->enqueue_umm = qe;
+ qs->dequeue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_CREATE( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_CREATE( qs->lock_dequeue_umm );
+
+ qs->user_state = user_state;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_cleanup( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state *qs,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element *qe, enum flag dummy_element_flag) )
+{
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element
+ *qe;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ while( libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_dequeue_umm(qs, &qe) )
+ element_cleanup_callback( qs, qe, LOWERED );
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_DESTROY( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_DESTROY( qs->lock_dequeue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_enqueue_umm( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element *qe )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ qe->next = NULL;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_GET( qs->lock_enqueue_umm );
+
+ qs->enqueue_umm->next = qe;
+ qs->enqueue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_RELEASE( qs->lock_enqueue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_dequeue_umm( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_atomic_element **qe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_GET( qs->lock_dequeue_umm );
+
+ if( qs->dequeue_umm == qs->enqueue_umm )
+ {
+ *qe = NULL;
+ rv = 0;
+ }
+ else
+ {
+ *qe = qs->dequeue_umm;
+ (*qe)->key = qs->dequeue_umm->next->key;
+ (*qe)->key = qs->dequeue_umm->next->value;
+ qs->dequeue_umm = qs->dequeue_umm->next;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_ATOMIC_RELEASE( qs->lock_dequeue_umm );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_queue_umm_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_init( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element *qe, void *user_state )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ qe->next = NULL;
+
+ qs->enqueue_umm = qe;
+ qs->dequeue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_CREATE( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_CREATE( qs->lock_dequeue_umm );
+
+ qs->user_state = user_state;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_cleanup( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state *qs,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element *qe, enum flag dummy_element_flag) )
+{
+ struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element
+ *qe;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ while( libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_dequeue_umm(qs, &qe) )
+ element_cleanup_callback( qs, qe, LOWERED );
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_DESTROY( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_DESTROY( qs->lock_dequeue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_enqueue_umm( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element *qe )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ qe->next = NULL;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_GET( qs->lock_enqueue_umm );
+
+ qs->enqueue_umm->next = qe;
+ qs->enqueue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_RELEASE( qs->lock_enqueue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_dequeue_umm( struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_state *qs, struct libbenchmark_datastructure_queue_umm_gcc_spinlock_sync_element **qe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_GET( qs->lock_dequeue_umm );
+
+ if( qs->dequeue_umm == qs->enqueue_umm )
+ {
+ *qe = NULL;
+ rv = 0;
+ }
+ else
+ {
+ *qe = qs->dequeue_umm;
+ (*qe)->key = qs->dequeue_umm->next->key;
+ (*qe)->key = qs->dequeue_umm->next->value;
+ qs->dequeue_umm = qs->dequeue_umm->next;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_GCC_SPINLOCK_SYNC_RELEASE( qs->lock_dequeue_umm );
+
+ return rv;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_queue_umm_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_msvc_spinlock_init( struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state *qs, struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element *qe, void *user_state )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ qe->next = NULL;
+
+ qs->enqueue_umm = qe;
+ qs->dequeue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_CREATE( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_CREATE( qs->lock_dequeue_umm );
+
+ qs->user_state = user_state;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_msvc_spinlock_cleanup( struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state *qs,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state *qs, struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element *qe, enum flag dummy_element_flag) )
+{
+ struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element
+ *qe;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ while( libbenchmark_datastructure_queue_umm_msvc_spinlock_dequeue_umm(qs, &qe) )
+ element_cleanup_callback( qs, qe, LOWERED );
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_DESTROY( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_DESTROY( qs->lock_dequeue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_msvc_spinlock_enqueue_umm( struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state *qs, struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element *qe )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ qe->next = NULL;
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_GET( qs->lock_enqueue_umm );
+
+ qs->enqueue_umm->next = qe;
+ qs->enqueue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_RELEASE( qs->lock_enqueue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_queue_umm_msvc_spinlock_dequeue_umm( struct libbenchmark_datastructure_queue_umm_msvc_spinlock_state *qs, struct libbenchmark_datastructure_queue_umm_msvc_spinlock_element **qe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_GET( qs->lock_dequeue_umm );
+
+ if( qs->dequeue_umm == qs->enqueue_umm )
+ {
+ *qe = NULL;
+ rv = 0;
+ }
+ else
+ {
+ *qe = qs->dequeue_umm;
+ (*qe)->key = qs->dequeue_umm->next->key;
+ (*qe)->key = qs->dequeue_umm->next->value;
+ qs->dequeue_umm = qs->dequeue_umm->next;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_MSVC_SPINLOCK_RELEASE( qs->lock_dequeue_umm );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_queue_umm_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_pthread_mutex_init( struct libbenchmark_datastructure_queue_umm_pthread_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_mutex_element *qe, void *user_state )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ qe->next = NULL;
+
+ qs->enqueue_umm = qe;
+ qs->dequeue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_CREATE( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_CREATE( qs->lock_dequeue_umm );
+
+ qs->user_state = user_state;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_pthread_mutex_cleanup( struct libbenchmark_datastructure_queue_umm_pthread_mutex_state *qs,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_queue_umm_pthread_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_mutex_element *qe, enum flag dummy_element_flag) )
+{
+ struct libbenchmark_datastructure_queue_umm_pthread_mutex_element
+ *qe;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ while( libbenchmark_datastructure_queue_umm_pthread_mutex_dequeue_umm(qs, &qe) )
+ element_cleanup_callback( qs, qe, LOWERED );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_DESTROY( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_DESTROY( qs->lock_dequeue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_pthread_mutex_enqueue_umm( struct libbenchmark_datastructure_queue_umm_pthread_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_mutex_element *qe )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ qe->next = NULL;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_GET( qs->lock_enqueue_umm );
+
+ qs->enqueue_umm->next = qe;
+ qs->enqueue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_RELEASE( qs->lock_enqueue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_queue_umm_pthread_mutex_dequeue_umm( struct libbenchmark_datastructure_queue_umm_pthread_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_mutex_element **qe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_GET( qs->lock_dequeue_umm );
+
+ if( qs->dequeue_umm == qs->enqueue_umm )
+ {
+ *qe = NULL;
+ rv = 0;
+ }
+ else
+ {
+ *qe = qs->dequeue_umm;
+ (*qe)->key = qs->dequeue_umm->next->key;
+ (*qe)->key = qs->dequeue_umm->next->value;
+ qs->dequeue_umm = qs->dequeue_umm->next;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_MUTEX_RELEASE( qs->lock_dequeue_umm );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_queue_umm_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_pthread_rwlock_init( struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element *qe, void *user_state )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ qe->next = NULL;
+
+ qs->enqueue_umm = qe;
+ qs->dequeue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_CREATE( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_CREATE( qs->lock_dequeue_umm );
+
+ qs->user_state = user_state;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_pthread_rwlock_cleanup( struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state *qs,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element *qe, enum flag dummy_element_flag) )
+{
+ struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element
+ *qe;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ while( libbenchmark_datastructure_queue_umm_pthread_rwlock_dequeue_umm(qs, &qe) )
+ element_cleanup_callback( qs, qe, LOWERED );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_DESTROY( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_DESTROY( qs->lock_dequeue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_pthread_rwlock_enqueue_umm( struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element *qe )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ qe->next = NULL;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_WRITE( qs->lock_enqueue_umm );
+
+ qs->enqueue_umm->next = qe;
+ qs->enqueue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_RELEASE( qs->lock_enqueue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_queue_umm_pthread_rwlock_dequeue_umm( struct libbenchmark_datastructure_queue_umm_pthread_rwlock_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_rwlock_element **qe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_GET_WRITE( qs->lock_dequeue_umm );
+
+ if( qs->dequeue_umm == qs->enqueue_umm )
+ {
+ *qe = NULL;
+ rv = 0;
+ }
+ else
+ {
+ *qe = qs->dequeue_umm;
+ (*qe)->key = qs->dequeue_umm->next->key;
+ (*qe)->key = qs->dequeue_umm->next->value;
+ qs->dequeue_umm = qs->dequeue_umm->next;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_RWLOCK_RELEASE( qs->lock_dequeue_umm );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_queue_umm_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_init( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element *qe, void *user_state )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ qe->next = NULL;
+
+ qs->enqueue_umm = qe;
+ qs->dequeue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_CREATE( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_CREATE( qs->lock_dequeue_umm );
+
+ qs->user_state = user_state;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_cleanup( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element *qe, enum flag dummy_element_flag) )
+{
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element
+ *qe;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ while( libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_dequeue_umm(qs, &qe) )
+ element_cleanup_callback( qs, qe, LOWERED );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_DESTROY( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_DESTROY( qs->lock_dequeue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_enqueue_umm( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element *qe )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ qe->next = NULL;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET( qs->lock_enqueue_umm );
+
+ qs->enqueue_umm->next = qe;
+ qs->enqueue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE( qs->lock_enqueue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_dequeue_umm( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_private_element **qe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_GET( qs->lock_dequeue_umm );
+
+ if( qs->dequeue_umm == qs->enqueue_umm )
+ {
+ *qe = NULL;
+ rv = 0;
+ }
+ else
+ {
+ *qe = qs->dequeue_umm;
+ (*qe)->key = qs->dequeue_umm->next->key;
+ (*qe)->key = qs->dequeue_umm->next->value;
+ qs->dequeue_umm = qs->dequeue_umm->next;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_PRIVATE_RELEASE( qs->lock_dequeue_umm );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_queue_umm_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_init( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element *qe, void *user_state )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ qe->next = NULL;
+
+ qs->enqueue_umm = qe;
+ qs->dequeue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_CREATE( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_CREATE( qs->lock_dequeue_umm );
+
+ qs->user_state = user_state;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_cleanup( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state *qs,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element *qe, enum flag dummy_element_flag) )
+{
+ struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element
+ *qe;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ while( libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_dequeue_umm(qs, &qe) )
+ element_cleanup_callback( qs, qe, LOWERED );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_DESTROY( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_DESTROY( qs->lock_dequeue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_enqueue_umm( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element *qe )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ qe->next = NULL;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET( qs->lock_enqueue_umm );
+
+ qs->enqueue_umm->next = qe;
+ qs->enqueue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE( qs->lock_enqueue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_dequeue_umm( struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_state *qs, struct libbenchmark_datastructure_queue_umm_pthread_spinlock_process_shared_element **qe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_GET( qs->lock_dequeue_umm );
+
+ if( qs->dequeue_umm == qs->enqueue_umm )
+ {
+ *qe = NULL;
+ rv = 0;
+ }
+ else
+ {
+ *qe = qs->dequeue_umm;
+ (*qe)->key = qs->dequeue_umm->next->key;
+ (*qe)->key = qs->dequeue_umm->next->value;
+ qs->dequeue_umm = qs->dequeue_umm->next;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_PTHREAD_SPINLOCK_PROCESS_SHARED_RELEASE( qs->lock_dequeue_umm );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_queue_umm_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_windows_critical_section_init( struct libbenchmark_datastructure_queue_umm_windows_critical_section_state *qs, struct libbenchmark_datastructure_queue_umm_windows_critical_section_element *qe, void *user_state )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ qe->next = NULL;
+
+ qs->enqueue_umm = qe;
+ qs->dequeue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_CREATE( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_CREATE( qs->lock_dequeue_umm );
+
+ qs->user_state = user_state;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_windows_critical_section_cleanup( struct libbenchmark_datastructure_queue_umm_windows_critical_section_state *qs,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_queue_umm_windows_critical_section_state *qs, struct libbenchmark_datastructure_queue_umm_windows_critical_section_element *qe, enum flag dummy_element_flag) )
+{
+ struct libbenchmark_datastructure_queue_umm_windows_critical_section_element
+ *qe;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ while( libbenchmark_datastructure_queue_umm_windows_critical_section_dequeue_umm(qs, &qe) )
+ element_cleanup_callback( qs, qe, LOWERED );
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_DESTROY( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_DESTROY( qs->lock_dequeue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_windows_critical_section_enqueue_umm( struct libbenchmark_datastructure_queue_umm_windows_critical_section_state *qs, struct libbenchmark_datastructure_queue_umm_windows_critical_section_element *qe )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ qe->next = NULL;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_GET( qs->lock_enqueue_umm );
+
+ qs->enqueue_umm->next = qe;
+ qs->enqueue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_RELEASE( qs->lock_enqueue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_queue_umm_windows_critical_section_dequeue_umm( struct libbenchmark_datastructure_queue_umm_windows_critical_section_state *qs, struct libbenchmark_datastructure_queue_umm_windows_critical_section_element **qe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_GET( qs->lock_dequeue_umm );
+
+ if( qs->dequeue_umm == qs->enqueue_umm )
+ {
+ *qe = NULL;
+ rv = 0;
+ }
+ else
+ {
+ *qe = qs->dequeue_umm;
+ (*qe)->key = qs->dequeue_umm->next->key;
+ (*qe)->key = qs->dequeue_umm->next->value;
+ qs->dequeue_umm = qs->dequeue_umm->next;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_CRITICAL_SECTION_RELEASE( qs->lock_dequeue_umm );
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_datastructure_queue_umm_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_windows_mutex_init( struct libbenchmark_datastructure_queue_umm_windows_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_windows_mutex_element *qe, void *user_state )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+ LFDS710_PAL_ASSERT( user_state == NULL );
+
+ qe->next = NULL;
+
+ qs->enqueue_umm = qe;
+ qs->dequeue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_CREATE( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_CREATE( qs->lock_dequeue_umm );
+
+ qs->user_state = user_state;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_windows_mutex_cleanup( struct libbenchmark_datastructure_queue_umm_windows_mutex_state *qs,
+ void (*element_cleanup_callback)(struct libbenchmark_datastructure_queue_umm_windows_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_windows_mutex_element *qe, enum flag dummy_element_flag) )
+{
+ struct libbenchmark_datastructure_queue_umm_windows_mutex_element
+ *qe;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( element_cleanup_callback != NULL )
+ while( libbenchmark_datastructure_queue_umm_windows_mutex_dequeue_umm(qs, &qe) )
+ element_cleanup_callback( qs, qe, LOWERED );
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_DESTROY( qs->lock_enqueue_umm );
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_DESTROY( qs->lock_dequeue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_datastructure_queue_umm_windows_mutex_enqueue_umm( struct libbenchmark_datastructure_queue_umm_windows_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_windows_mutex_element *qe )
+{
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ qe->next = NULL;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_GET( qs->lock_enqueue_umm );
+
+ qs->enqueue_umm->next = qe;
+ qs->enqueue_umm = qe;
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_RELEASE( qs->lock_enqueue_umm );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_datastructure_queue_umm_windows_mutex_dequeue_umm( struct libbenchmark_datastructure_queue_umm_windows_mutex_state *qs, struct libbenchmark_datastructure_queue_umm_windows_mutex_element **qe )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( qs != NULL );
+ LFDS710_PAL_ASSERT( qe != NULL );
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_GET( qs->lock_dequeue_umm );
+
+ if( qs->dequeue_umm == qs->enqueue_umm )
+ {
+ *qe = NULL;
+ rv = 0;
+ }
+ else
+ {
+ *qe = qs->dequeue_umm;
+ (*qe)->key = qs->dequeue_umm->next->key;
+ (*qe)->key = qs->dequeue_umm->next->value;
+ qs->dequeue_umm = qs->dequeue_umm->next;
+ }
+
+ LIBBENCHMARK_PAL_LOCK_WINDOWS_MUTEX_RELEASE( qs->lock_dequeue_umm );
+
+ return rv;
+}
+
--- /dev/null
+/***** public prototypes *****/
+#include "../inc/libbenchmark.h"
+
+/***** defines *****/
+#define and &&
+#define or ||
+
+#define NO_FLAGS 0x0
+
+#define NUMBER_UPPERCASE_LETTERS_IN_LATIN_ALPHABET 26
+#define NUMBER_OF_NANOSECONDS_IN_ONE_SECOND 1000000000LLU
+#define TIME_LOOP_COUNT 1000
+#define DEFAULT_BENCHMARK_DURATION_IN_SECONDS 5
+
+#define ONE_KILOBYTES_IN_BYTES 1024
+
+#define LIBBENCHMARK_VERSION_STRING "7.1.0"
+#define LIBBENCHMARK_VERSION_INTEGER 710
+
+#define RETURN_SUCCESS 0
+#define RETURN_FAILURE 1
+
+#if( defined KERNEL_MODE )
+ #define MODE_TYPE_STRING "kernel-mode"
+#endif
+
+#if( !defined KERNEL_MODE )
+ #define MODE_TYPE_STRING "user-mode"
+#endif
+
+#if( defined NDEBUG && !defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "release"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "debug"
+#endif
+
+#if( !defined NDEBUG && defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "coverage"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "threadsanitizer"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN && defined PROF )
+ #define BUILD_TYPE_STRING "profiling"
+#endif
+
+/***** library-wide prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+char const
+ * const libbenchmark_globals_datastructure_names[] =
+ {
+ "btree_au",
+ "freelist",
+ "queue_umm"
+ },
+ * const libbenchmark_globals_benchmark_names[] =
+ {
+ "readn_then_writen",
+ "push1_then_pop1",
+ "enqueue1_then_dequeue1"
+ },
+ * const libbenchmark_globals_lock_names[] =
+ {
+ "GCC spinlock (atomic)",
+ "GCC spinlock (sync)",
+ "liblfds700 (lock-free)",
+ "liblfds710 (lock-free)",
+ "MSVC spinlock",
+ "pthread mutex",
+ "pthread rwlock",
+ "pthread spinlock (private)",
+ "pthread spinlock (shared)",
+ "windows critical section",
+ "windows mutex"
+ },
+ * const libbenchmark_globals_numa_mode_names[] =
+ {
+ "smp",
+ "numa",
+ "numa_unused"
+ };
+
+lfds710_pal_uint_t
+ libbenchmark_globals_benchmark_duration_in_seconds = DEFAULT_BENCHMARK_DURATION_IN_SECONDS;
+
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_misc_pal_helper_new_topology_node( struct libbenchmark_topology_node_state **tns,
+ struct libshared_memory_state *ms )
+{
+ LFDS710_PAL_ASSERT( tns != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ *tns = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libbenchmark_topology_node_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ libbenchmark_topology_node_init( *tns );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_misc_pal_helper_add_logical_processor_to_topology_node( struct libbenchmark_topology_node_state *tns,
+ struct libshared_memory_state *ms,
+ lfds710_pal_uint_t logical_processor_number,
+ enum flag windows_processor_group_inuse_flag,
+ lfds710_pal_uint_t windows_processor_group_number )
+{
+ struct libbenchmark_topology_node_state
+ *tns_temp;
+
+ LFDS710_PAL_ASSERT( tns != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : logical_processor_number can be any value in its range
+ // TRD : windows_processor_group_inuse_flag can be any value in its range
+ // TRD : windows_processor_group_number can be any value in its range
+
+ libbenchmark_misc_pal_helper_new_topology_node( &tns_temp, ms );
+
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE( *tns_temp, LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR );
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_LOGICAL_PROCESSOR_NUMBER( *tns_temp, logical_processor_number );
+
+ if( windows_processor_group_inuse_flag == RAISED )
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_WINDOWS_GROUP_NUMBER( *tns_temp, windows_processor_group_number );
+ else
+ LIBBENCHMARK_TOPOLOGY_NODE_UNSET_WINDOWS_GROUP_NUMBER( *tns_temp );
+
+ LFDS710_LIST_ASO_SET_KEY_IN_ELEMENT( tns_temp->lasoe, tns_temp );
+ LFDS710_LIST_ASO_SET_VALUE_IN_ELEMENT( tns_temp->lasoe, tns_temp );
+ lfds710_list_aso_insert( &tns->logical_processor_children, &tns_temp->lasoe, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_misc_pal_helper_add_system_node_to_topology_tree( struct libbenchmark_topology_state *ts,
+ struct libbenchmark_topology_node_state *tns )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( tns != NULL );
+
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE( *tns, LIBBENCHMARK_TOPOLOGY_NODE_TYPE_SYSTEM );
+
+ libbenchmark_topology_insert( ts, tns );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_misc_pal_helper_add_numa_node_to_topology_tree( struct libbenchmark_topology_state *ts,
+ struct libbenchmark_topology_node_state *tns,
+ lfds710_pal_uint_t numa_node_id )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( tns != NULL );
+ // TRD : numa_node_id can be NULL
+
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE( *tns, LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA );
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_NUMA_ID( *tns, numa_node_id );
+
+ libbenchmark_topology_insert( ts, tns );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_misc_pal_helper_add_socket_node_to_topology_tree( struct libbenchmark_topology_state *ts,
+ struct libbenchmark_topology_node_state *tns )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( tns != NULL );
+
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE( *tns, LIBBENCHMARK_TOPOLOGY_NODE_TYPE_SOCKET );
+
+ libbenchmark_topology_insert( ts, tns );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_misc_pal_helper_add_physical_processor_node_to_topology_tree( struct libbenchmark_topology_state *ts,
+ struct libbenchmark_topology_node_state *tns )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( tns != NULL );
+
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE( *tns, LIBBENCHMARK_TOPOLOGY_NODE_TYPE_PHYSICAL_PROCESSOR );
+
+ libbenchmark_topology_insert( ts, tns );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_misc_pal_helper_add_cache_node_to_topology_tree( struct libbenchmark_topology_state *ts,
+ struct libbenchmark_topology_node_state *tns,
+ lfds710_pal_uint_t level,
+ enum libbenchmark_topology_node_cache_type type )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( tns != NULL );
+ // TRD : level can be any value in its range
+ // TRD : type can be any value in its range
+
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE( *tns, LIBBENCHMARK_TOPOLOGY_NODE_TYPE_CACHE );
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_CACHE_LEVEL( *tns, level );
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_CACHE_TYPE( *tns, type );
+
+ libbenchmark_topology_insert( ts, tns );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_misc_pal_helper_add_logical_processor_node_to_topology_tree( struct libbenchmark_topology_state *ts,
+ struct libshared_memory_state *ms,
+ lfds710_pal_uint_t logical_processor_number,
+ enum flag windows_processor_group_inuse_flag,
+ lfds710_pal_uint_t windows_processor_group_number )
+{
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : logical_processor_number can be any value in its range
+ // TRD : windows_processor_group_inuse_flag can be any value in its range
+ // TRD : windows_processor_group_number can be any value in its range
+
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE( *tns, LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR );
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_LOGICAL_PROCESSOR_NUMBER( *tns, logical_processor_number );
+
+ if( windows_processor_group_inuse_flag == RAISED )
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_WINDOWS_GROUP_NUMBER( *tns, windows_processor_group_number );
+ else
+ LIBBENCHMARK_TOPOLOGY_NODE_UNSET_WINDOWS_GROUP_NUMBER( *tns );
+
+ libbenchmark_topology_insert( ts, tns );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libbenchmark_misc_query( enum libbenchmark_misc_query query_type, void *query_input, void *query_output )
+{
+ // TRD : query type can be any value in its range
+ // TRD : query_input can be NULL in some cases
+ // TRD : query_outputput can be NULL in some cases
+
+ switch( query_type )
+ {
+ case LIBBENCHMARK_MISC_QUERY_GET_BUILD_AND_VERSION_STRING:
+ {
+ char static const
+ * const build_and_version_string = "libbenchmark " LIBBENCHMARK_MISC_VERSION_STRING " (" BUILD_TYPE_STRING ", " LIBBENCHMARK_PAL_OS_STRING ", " MODE_TYPE_STRING ")";
+
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ *(char const **) query_output = build_and_version_string;
+ }
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_porting_abstraction_layer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && !defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_WINXPSP3 && NTDDI_VERSION < NTDDI_WIN7 )
+
+ #ifdef LIBBENCHMARK_PAL_POPULATE_TOPOLOGY
+ #error More than one porting abstraction layer matches current platform in "libbenchmark_porting_abstraction_layer_populate_topology.c".
+ #endif
+
+ #define LIBBENCHMARK_PAL_POPULATE_TOPOLOGY
+
+ static void internal_populate_logical_processor_array_from_bitmask( struct libshared_memory_state *ms, struct libbenchmark_topology_node_state *tns, lfds710_pal_uint_t bitmask );
+
+ int libbenchmark_porting_abstraction_layer_populate_topology( struct libbenchmark_topology_state *ts,
+ struct libshared_memory_state *ms )
+ {
+ BOOL
+ brv;
+
+ DWORD
+ slpi_length = 0,
+ number_slpi,
+ loop;
+
+ enum libbenchmark_topology_node_cache_type
+ processor_cache_type_to_libbenchmark_topology_node_cache_type[3] =
+ {
+ LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_UNIFIED, LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_INSTRUCTION, LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_DATA
+ };
+
+ int
+ rv = 1;
+
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION
+ *slpi = NULL;
+
+ ULONG_PTR
+ mask;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ // TRD : obtain information from the OS
+ brv = GetLogicalProcessorInformation( slpi, &slpi_length );
+ slpi = libshared_memory_alloc_from_most_free_space_node( ms, slpi_length, sizeof(lfds710_pal_uint_t) );
+ brv = GetLogicalProcessorInformation( slpi, &slpi_length );
+ number_slpi = slpi_length / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
+
+ /* TRD : we loop twice over the topology information
+ first time we form up the system node
+ and add that
+ second time, we do everything else
+ */
+
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+
+ for( loop = 0 ; loop < number_slpi ; loop++ )
+ if( (slpi+loop)->Relationship == RelationNumaNode )
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) (slpi+loop)->ProcessorMask );
+
+ libbenchmark_misc_pal_helper_add_system_node_to_topology_tree( ts, tns );
+
+ for( loop = 0 ; loop < number_slpi ; loop++ )
+ {
+ if( (slpi+loop)->Relationship == RelationNumaNode )
+ {
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) ((slpi+loop)->ProcessorMask) );
+ libbenchmark_misc_pal_helper_add_numa_node_to_topology_tree( ts, tns, (lfds710_pal_uint_t) (slpi+loop)->NumaNode.NodeNumber );
+
+ // TRD : add each LP as an individual LP node
+ for( mask = 1 ; mask != 0 ; mask <<= 1 )
+ if( ((slpi+loop)->ProcessorMask & mask) == mask )
+ libbenchmark_misc_pal_helper_add_logical_processor_node_to_topology_tree( ts, ms, (lfds710_pal_uint_t) ((slpi+loop)->ProcessorMask & mask), LOWERED, 0 );
+ }
+
+ if( (slpi+loop)->Relationship == RelationProcessorPackage )
+ {
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) ((slpi+loop)->ProcessorMask) );
+ libbenchmark_misc_pal_helper_add_socket_node_to_topology_tree( ts, tns );
+ }
+
+ if( (slpi+loop)->Relationship == RelationProcessorCore )
+ {
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) ((slpi+loop)->ProcessorMask) );
+ libbenchmark_misc_pal_helper_add_physical_processor_node_to_topology_tree( ts, tns );
+ }
+
+ if( (slpi+loop)->Relationship == RelationCache )
+ {
+ if( (slpi+loop)->Cache.Type == CacheUnified or (slpi+loop)->Cache.Type == CacheInstruction or (slpi+loop)->Cache.Type == CacheData )
+ {
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) (slpi+loop)->ProcessorMask );
+ libbenchmark_misc_pal_helper_add_cache_node_to_topology_tree( ts, tns, (lfds710_pal_uint_t) (slpi+loop)->Cache.Level, processor_cache_type_to_libbenchmark_topology_node_cache_type[(slpi+loop)->Cache.Type] );
+ }
+ }
+ }
+
+ return rv;
+ }
+
+ /****************************************************************************/
+ static void internal_populate_logical_processor_array_from_bitmask( struct libshared_memory_state *ms,
+ struct libbenchmark_topology_node_state *tns,
+ lfds710_pal_uint_t bitmask )
+ {
+ lfds710_pal_uint_t
+ logical_processor_number = 1;
+
+ struct libbenchmark_topology_node_state
+ *tns_temp;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( tns != NULL );
+ // TRD : bitmask can be any value in its range
+
+ /* TRD : iterate over the bits in the bitmask
+ each is a LP number
+ add every LP to *tns
+ */
+
+ while( bitmask != 0 )
+ {
+ if( bitmask & 0x1 )
+ libbenchmark_misc_pal_helper_add_logical_processor_to_topology_node( tns, ms, logical_processor_number, LOWERED, 0 );
+
+ bitmask >>= 1;
+ logical_processor_number++;
+ }
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && !defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_WIN7 )
+
+ #ifdef LIBBENCHMARK_PAL_POPULATE_TOPOLOGY
+ #error More than one porting abstraction layer matches current platform in "libbenchmark_porting_abstraction_layer_populate_topology.c".
+ #endif
+
+ #define LIBBENCHMARK_PAL_POPULATE_TOPOLOGY
+
+ static int numa_node_id_to_numa_node_id_compare_function( void const *new_key, void const *existing_key );
+ static void nna_cleanup( struct lfds710_btree_au_state *baus, struct lfds710_btree_au_element *baue );
+ static void internal_populate_logical_processor_array_from_bitmask( struct libshared_memory_state *ms, struct libbenchmark_topology_node_state *tns, lfds710_pal_uint_t windows_processor_group_number, lfds710_pal_uint_t bitmask );
+
+ int libbenchmark_porting_abstraction_layer_populate_topology( struct libbenchmark_topology_state *ts,
+ struct libshared_memory_state *ms )
+ {
+ BOOL
+ brv;
+
+ DWORD
+ offset = 0,
+ slpie_length = 0,
+ subloop;
+
+ /*
+ enum libbenchmark_topology_node_cache_type
+ processor_cache_type_to_libbenchmark_topology_node_cache_type[3] =
+ {
+ LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_UNIFIED, LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_INSTRUCTION, LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_DATA
+ };
+ */
+
+ int
+ rv = 1;
+
+ KAFFINITY
+ bitmask;
+
+ lfds710_pal_uint_t
+ logical_processor_number;
+
+ struct lfds710_btree_au_element
+ *baue;
+
+ struct lfds710_btree_au_state
+ nna_tree_state;
+
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX
+ *slpie,
+ *slpie_buffer = NULL;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ // TRD : obtain information from the OS
+ brv = GetLogicalProcessorInformationEx( RelationAll, slpie_buffer, &slpie_length );
+ slpie_buffer = libshared_memory_alloc_from_most_free_space_node( ms, slpie_length, sizeof(lfds710_pal_uint_t) );
+ brv = GetLogicalProcessorInformationEx( RelationAll, slpie_buffer, &slpie_length );
+
+ /* TRD : this API from MS is absolutely bloody appalling
+ staggeringly and completely needlessly complex and inadequately documented
+ I think I've found at least one design flaw
+ and I'm inferring from the C structures a good deal of what's presumably going on
+ where the docs just don't say
+
+ (addendum - I've just found another huge fucking issue which has wasted two fucking days of my time
+ the original non-Ex() API returns an actual C array, where the elements are structs, which contain
+ a union, but in C the struct is sized to the max size of the union, so you can iterate over the array
+
+ the NEW version, in the docs still says "array", but it actually returns a PACKED "array" (not an
+ array, because you can't iterate over it) where the each element now has a Size member - you need
+ to move your pointer by the number of bytes in Size - this is NOT in the docs, there is NO example
+ code, and the ONLY WAY YOU CAN GUESS IS TO NOTICE THERE IS A SIZE MEMBER IN THE NEW STRUCT)
+
+ (for example, just found a one-liner buried in the note on a particular structure
+ returned for a particular node type;
+
+ "If the PROCESSOR_RELATIONSHIP structure represents a processor core, the GroupCount member is always 1."
+
+ this *implies* that a physical core is never split across groups
+ this is a very important fact, if you're trying to work with this fucking API
+ but it's not actually SPECIFICALLY STATED
+ it's only implied - and so I do not feel confident in it
+ and the appalling design and appallingly low quality of the docs in general hardly gives me confidence
+ to just go ahead and believe in anything I find written - let alone something which is, offfhand, just
+ implies, buried in some structure notes somewhere
+ this is how it is all the way across this entire bloody API
+ another example is that LPs are not actually returned by the API
+ I'm *inferring* I can get the full list by taking the LP masks presented by the NUMA nodes
+ it's *not* documented - i.e. it's not documented HOW TO GET THE LIST OF LOGICAL PROCESSORS IN THE SYSTEM
+ fucking christ...!)
+
+ I'm absolutely 100% certain my use of the API is not fully correct
+ but I have no way to find out
+ MS are bloody idiots - the "processor group" concept is absolutely and utterly crazy
+ and it complicates *everything* by a power of 2
+ rather than simply iterating over the records provided,
+ where just about any entity in the system (NUMA node, processor socket, etc)
+ can have multiple records being returned, I have in fact to iterate over the whole
+ record set, accumulating the multiple records, so I can FINALLY find out the full
+ logical processor set for any given entity, so I can THEN, FINALLY, insert the entity
+ into the toplogy tree
+ i.e. for any given node, you have to fully iterate the list of records provided by
+ the OS, to actually know you know all the LPs for that node
+ there is no single-entity/single-record lookup or relationship
+ MS -> you are bloody idiots; this is appalling, OBVIOUSLY appalling, and whoever
+ designed it, and ESPECIALLY whoever APPROVED It, needs not only to be fired, but SHOT
+
+ as ever with MS, something that takes a few minutes in Linux takes bloody hours with MS
+
+ note due to aforementioned design flaw, it is not possible to collect cache information
+ the problem is that if we have a cache which spans multiple processor groups, there will
+ be mutiple records (or I presume there will be - I'm inferring), BUT, looking at the
+ structures, it's not possible to know these are *the same cache*
+
+ so, this mess;
+
+ 1. RelationNumaNode
+ - we need to loop over the full list of records to accumulate the full set of LPs for each NUMA node
+ then we can add the record to tbe btree
+ 2. RelationGroup
+ - really REALLY don't care - with prejudice
+ 3. RelationProcessorPackage
+ - bizarrely, actually does the right thing (as far as it can be right in this sorry mess) and contains
+ the full list of group IDs it belongs to, and the full list of LP IDs within each group
+ so we can iterate once over the full set of records and insert this record type directly
+ 4. RelationProcessorCore
+ - same as RelationProcessorPackage
+ 5. RelationCache
+ - seems fubared; provide a single processor group and single mask of LPs, and so if a cache spans
+ multiple processor groups, we'll get multiple records for it - problem is, we've no way of knowing
+ *its the same cache*
+ we get away with this with NUMA because each node has an ID
+ the next best thing is going to be record the details of the cache from the structure
+ (level, associativity, etc) and match based on that
+ God I hate Microsoft
+ */
+
+ // TRD : iterate once for system node
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+
+ while( offset < slpie_length )
+ {
+ slpie = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *) ( (char unsigned *) slpie_buffer + offset );
+
+ offset += slpie->Size;
+
+ if( slpie->Relationship == RelationNumaNode )
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) (slpie->NumaNode.GroupMask.Group), (lfds710_pal_uint_t) (slpie->NumaNode.GroupMask.Mask) );
+ }
+
+ libbenchmark_misc_pal_helper_add_system_node_to_topology_tree( ts, tns );
+
+ // TRD : iterate again for everything else
+ lfds710_btree_au_init_valid_on_current_logical_core( &nna_tree_state, numa_node_id_to_numa_node_id_compare_function, LFDS710_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY, ts );
+
+ offset = 0;
+
+ while( offset < slpie_length )
+ {
+ slpie = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *) ( (char unsigned *) slpie_buffer + offset );
+
+ offset += slpie->Size;
+
+ if( slpie->Relationship == RelationNumaNode )
+ {
+ /* TRD : now for the first madness - accumulate the NUMA node records
+
+ first, try to find this node in nna_tree_state
+ if it's there, we use it - it not, we make it and add it, and use it
+ once we've got a node to work with, we add the current list of LPs to that node
+ */
+
+ rv = lfds710_btree_au_get_by_key( &nna_tree_state, NULL, (void *) &slpie->NumaNode.NodeNumber, &baue );
+
+ if( rv == 0 )
+ {
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ baue = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds710_btree_au_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( *baue, (void *) &slpie->NumaNode.NodeNumber );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( *baue, tns );
+ lfds710_btree_au_insert( &nna_tree_state, baue, NULL );
+ }
+
+ // TRD : baue now points at the correct node
+ tns = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *baue );
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) slpie->NumaNode.GroupMask.Group, (lfds710_pal_uint_t) slpie->NumaNode.GroupMask.Mask );
+
+ // TRD : now all all LPs from this NUMA node to tree
+ logical_processor_number = 0;
+ bitmask = slpie->NumaNode.GroupMask.Mask;
+
+ while( bitmask != 0 )
+ {
+ if( bitmask & 0x1 )
+ libbenchmark_misc_pal_helper_add_logical_processor_node_to_topology_tree( ts, ms, logical_processor_number, RAISED, (slpie->NumaNode.GroupMask.Group) );
+
+ bitmask >>= 1;
+ logical_processor_number++;
+ }
+ }
+
+ if( slpie->Relationship == RelationGroup )
+ {
+ // TRD : we don't care about this - actually, we do care, we really REALLY hate this
+ }
+
+ if( slpie->Relationship == RelationProcessorPackage )
+ {
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ for( subloop = 0 ; subloop < slpie->Processor.GroupCount ; subloop++ )
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) slpie->Processor.GroupMask[subloop].Group, (lfds710_pal_uint_t) slpie->Processor.GroupMask[subloop].Mask );
+ libbenchmark_misc_pal_helper_add_socket_node_to_topology_tree( ts, tns );
+ }
+
+ if( slpie->Relationship == RelationProcessorCore )
+ {
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ for( subloop = 0 ; subloop < slpie->Processor.GroupCount ; subloop++ )
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) slpie->Processor.GroupMask[subloop].Group, (lfds710_pal_uint_t) slpie->Processor.GroupMask[subloop].Mask );
+ libbenchmark_misc_pal_helper_add_physical_processor_node_to_topology_tree( ts, tns );
+ }
+
+ /*
+ if( slpie->Relationship == RelationCache )
+ {
+ if( slpie->Cache.Type == CacheUnified or slpie->Cache.Type == CacheInstruction or slpie->Cache.Type == CacheData )
+ {
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) slpie->ProcessorMask );
+ libbenchmark_misc_pal_helper_add_cache_node_to_topology_tree( ts, tns, (lfds710_pal_uint_t) slpie->Cache.Level, processor_cache_type_to_libbenchmark_topology_node_cache_type[slpie->Cache.Type] );
+ }
+ }
+ */
+ }
+
+ /* TRD : now finally insert the built-up NUMA and cache records
+ we call cleanup() on the accumulator tree - it's safe to re-use the nodes as they're emitted to the cleanup function
+ so we then throw them into the topology_state tree
+ */
+
+ lfds710_btree_au_cleanup( &nna_tree_state, nna_cleanup );
+
+ return rv;
+ }
+
+ /****************************************************************************/
+ static int numa_node_id_to_numa_node_id_compare_function( void const *new_key, void const *existing_key )
+ {
+ int
+ cr = 0;
+
+ DWORD
+ numa_node_id_existing,
+ numa_node_id_new;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ numa_node_id_new = *(DWORD *) new_key;
+ numa_node_id_existing = *(DWORD *) existing_key;
+
+ if( numa_node_id_new < numa_node_id_existing )
+ cr = -1;
+
+ if( numa_node_id_new > numa_node_id_existing )
+ cr = 1;
+
+ return cr;
+ }
+
+ /****************************************************************************/
+ static void nna_cleanup( struct lfds710_btree_au_state *baus, struct lfds710_btree_au_element *baue )
+ {
+ DWORD
+ *numa_node_id;
+
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ struct libbenchmark_topology_state
+ *ts;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ ts = LFDS710_BTREE_AU_GET_USER_STATE_FROM_STATE( *baus );
+ numa_node_id = LFDS710_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue );
+ tns = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *baue );
+
+ libbenchmark_misc_pal_helper_add_numa_node_to_topology_tree( ts, tns, (lfds710_pal_uint_t) *numa_node_id );
+
+ return;
+ }
+
+ /****************************************************************************/
+ static void internal_populate_logical_processor_array_from_bitmask( struct libshared_memory_state *ms,
+ struct libbenchmark_topology_node_state *tns,
+ lfds710_pal_uint_t windows_processor_group_number,
+ lfds710_pal_uint_t bitmask )
+ {
+ lfds710_pal_uint_t
+ logical_processor_number = 0;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( tns != NULL );
+ // TRD : windows_processor_group_number can be any value in its range
+ // TRD : bitmask can be any value in its range
+
+ /* TRD : iterate over the bits in the bitmask
+ each is a LP number
+ add every LP to *tns
+ */
+
+ while( bitmask != 0 )
+ {
+ if( bitmask & 0x1 )
+ libbenchmark_misc_pal_helper_add_logical_processor_to_topology_node( tns, ms, logical_processor_number, RAISED, windows_processor_group_number );
+
+ bitmask >>= 1;
+ logical_processor_number++;
+ }
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_WINXP && NTDDI_VERSION < NTDDI_WIN7 )
+
+ #ifdef LIBBENCHMARK_PAL_POPULATE_TOPOLOGY
+ #error More than one porting abstraction layer matches current platform in "libbenchmark_porting_abstraction_layer_populate_topology.c".
+ #endif
+
+ #define LIBBENCHMARK_PAL_POPULATE_TOPOLOGY
+
+ int libbenchmark_porting_abstraction_layer_populate_topology( struct libbenchmark_topology_state *ts,
+ struct libshared_memory_state *ms )
+ {
+ CCHAR
+ loop;
+
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ /* TRD : prior to Windows 7 there is no way to enumerate CPU topology
+ all that is available is a count of the number of logical cores, KeNumberProcessors
+ this is in fact only available *up to Vista SP1*... Windows 7 provides full functionality to get topology,
+ so it's not clear what should be done on Vista SP1...
+
+ as such to get the topology actually right, the user has to hardcode it
+
+ the best general solution seems to be to take the number of logical cores
+ assumes they're all on one processor and there's one NUMA node
+ */
+
+ // TRD : create the system node, populate and insert
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ for( loop = 0 ; loop < KeNumberProcessors ; loop++ )
+ libbenchmark_misc_pal_helper_add_logical_processor_to_topology_node( tns, ms, loop, LOWERED, 0 );
+ libbenchmark_misc_pal_helper_add_system_node_to_topology_tree( ts, tns );
+
+ // TRD : create the NUMA node, populate and insert
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ for( loop = 0 ; loop < KeNumberProcessors ; loop++ )
+ libbenchmark_misc_pal_helper_add_logical_processor_to_topology_node( tns, ms, loop, LOWERED, 0 );
+ libbenchmark_misc_pal_helper_add_numa_node_to_topology_tree( ts, tns, 0 );
+
+ // TRD : create the socket node, populate and insert
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ for( loop = 0 ; loop < KeNumberProcessors ; loop++ )
+ libbenchmark_misc_pal_helper_add_logical_processor_to_topology_node( tns, ms, loop, LOWERED, 0 );
+ libbenchmark_misc_pal_helper_add_socket_node_to_topology_tree( ts, tns );
+
+ // TRD : create the physical processor node, populate and insert
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ for( loop = 0 ; loop < KeNumberProcessors ; loop++ )
+ libbenchmark_misc_pal_helper_add_logical_processor_to_topology_node( tns, ms, loop, LOWERED, 0 );
+ libbenchmark_misc_pal_helper_add_physical_processor_node_to_topology_tree( ts, tns );
+
+ // TRD : create the logical processor nodes, populate and insert
+ for( loop = 0 ; loop < KeNumberProcessors ; loop++ )
+ libbenchmark_misc_pal_helper_add_logical_processor_node_to_topology_tree( ts, ms, loop, LOWERED, 0 );
+
+ return 1;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_WIN7 )
+
+ #ifdef LIBBENCHMARK_PAL_POPULATE_TOPOLOGY
+ #error More than one porting abstraction layer matches current platform in "libbenchmark_porting_abstraction_layer_populate_topology.c".
+ #endif
+
+ #define LIBBENCHMARK_PAL_POPULATE_TOPOLOGY
+
+ static int numa_node_id_to_numa_node_id_compare_function( void const *new_key, void const *existing_key );
+ static void nna_cleanup( struct lfds710_btree_au_state *baus, struct lfds710_btree_au_element *baue );
+ static void internal_populate_logical_processor_array_from_bitmask( struct libshared_memory_state *ms, struct libbenchmark_topology_node_state *tns, lfds710_pal_uint_t windows_processor_group_number, lfds710_pal_uint_t bitmask );
+
+ int libbenchmark_porting_abstraction_layer_populate_topology( struct libbenchmark_topology_state *ts,
+ struct libshared_memory_state *ms )
+ {
+ /*
+ enum libbenchmark_topology_node_cache_type
+ processor_cache_type_to_libbenchmark_topology_node_cache_type[3] =
+ {
+ LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_UNIFIED, LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_INSTRUCTION, LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_DATA
+ };
+ */
+
+ int
+ rv = 1;
+
+ KAFFINITY
+ bitmask;
+
+ lfds710_pal_uint_t
+ logical_processor_number;
+
+ NTSTATUS
+ brv;
+
+ struct lfds710_btree_au_element
+ *baue;
+
+ struct lfds710_btree_au_state
+ nna_tree_state;
+
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX
+ *slpie,
+ *slpie_buffer = NULL;
+
+ ULONG
+ offset = 0,
+ slpie_length = 0,
+ subloop;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ // TRD : obtain information from the OS
+ brv = KeQueryLogicalProcessorRelationship( NULL, RelationAll, slpie_buffer, &slpie_length );
+ slpie_buffer = libshared_memory_alloc_from_most_free_space_node( ms, slpie_length, sizeof(lfds710_pal_uint_t) );
+ brv = KeQueryLogicalProcessorRelationship( NULL, RelationAll, slpie_buffer, &slpie_length );
+
+ /* TRD : this API from MS is absolutely bloody appalling
+ staggeringly and completely needlessly complex and inadequately documented
+ I think I've found at least one design flaw
+ and I'm inferring from the C structures a good deal of what's presumably going on
+ where the docs just don't say
+
+ (addendum - I've just found another huge fucking issue which has wasted two fucking days of my time
+ the original non-Ex() API returns an actual C array, where the elements are structs, which contain
+ a union, but in C the struct is sized to the max size of the union, so you can iterate over the array
+
+ the NEW version, in the docs still says "array", but it actually returns a PACKED "array" (not an
+ array, because you can't iterate over it) where the each element now has a Size member - you need
+ to move your pointer by the number of bytes in Size - this is NOT in the docs, there is NO example
+ code, and the ONLY WAY YOU CAN GUESS IS TO NOTICE THERE IS A SIZE MEMBER IN THE NEW STRUCT)
+
+ (for example, just found a one-liner buried in the note on a particular structure
+ returned for a particular node type;
+
+ "If the PROCESSOR_RELATIONSHIP structure represents a processor core, the GroupCount member is always 1."
+
+ this *implies* that a physical core is never split across groups
+ this is a very important fact, if you're trying to work with this fucking API
+ but it's not actually SPECIFICALLY STATED
+ it's only implied - and so I do not feel confident in it
+ and the appalling design and appallingly low quality of the docs in general hardly gives me confidence
+ to just go ahead and believe in anything I find written - let alone something which is, offfhand, just
+ implies, buried in some structure notes somewhere
+ this is how it is all the way across this entire bloody API
+ another example is that LPs are not actually returned by the API
+ I'm *inferring* I can get the full list by taking the LP masks presented by the NUMA nodes
+ it's *not* documented - i.e. it's not documented HOW TO GET THE LIST OF LOGICAL PROCESSORS IN THE SYSTEM
+ fucking christ...!)
+
+ I'm absolutely 100% certain my use of the API is not fully correct
+ but I have no way to find out
+ MS are bloody idiots - the "processor group" concept is absolutely and utterly crazy
+ and it complicates *everything* by a power of 2
+ rather than simply iterating over the records provided,
+ where just about any entity in the system (NUMA node, processor socket, etc)
+ can have multiple records being returned, I have in fact to iterate over the whole
+ record set, accumulating the multiple records, so I can FINALLY find out the full
+ logical processor set for any given entity, so I can THEN, FINALLY, insert the entity
+ into the toplogy tree
+ i.e. for any given node, you have to fully iterate the list of records provided by
+ the OS, to actually know you know all the LPs for that node
+ there is no single-entity/single-record lookup or relationship
+ MS -> you are bloody idiots; this is appalling, OBVIOUSLY appalling, and whoever
+ designed it, and ESPECIALLY whoever APPROVED It, needs not only to be fired, but SHOT
+
+ as ever with MS, something that takes a few minutes in Linux takes bloody hours with MS
+
+ note due to aforementioned design flaw, it is not possible to collect cache information
+ the problem is that if we have a cache which spans multiple processor groups, there will
+ be mutiple records (or I presume there will be - I'm inferring), BUT, looking at the
+ structures, it's not possible to know these are *the same cache*
+
+ so, this mess;
+
+ 1. RelationNumaNode
+ - we need to loop over the full list of records to accumulate the full set of LPs for each NUMA node
+ then we can add the record to tbe btree
+ 2. RelationGroup
+ - really REALLY don't care - with prejudice
+ 3. RelationProcessorPackage
+ - bizarrely, actually does the right thing (as far as it can be right in this sorry mess) and contains
+ the full list of group IDs it belongs to, and the full list of LP IDs within each group
+ so we can iterate once over the full set of records and insert this record type directly
+ 4. RelationProcessorCore
+ - same as RelationProcessorPackage
+ 5. RelationCache
+ - seems fubared; provide a single processor group and single mask of LPs, and so if a cache spans
+ multiple processor groups, we'll get multiple records for it - problem is, we've no way of knowing
+ *its the same cache*
+ we get away with this with NUMA because each node has an ID
+ the next best thing is going to be record the details of the cache from the structure
+ (level, associativity, etc) and match based on that
+ God I hate Microsoft
+ */
+
+ // TRD : iterate once for system node
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+
+ while( offset < slpie_length )
+ {
+ slpie = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *) ( (char unsigned *) slpie_buffer + offset );
+
+ offset += slpie->Size;
+
+ if( slpie->Relationship == RelationNumaNode )
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) (slpie->NumaNode.GroupMask.Group), (lfds710_pal_uint_t) (slpie->NumaNode.GroupMask.Mask) );
+ }
+
+ libbenchmark_misc_pal_helper_add_system_node_to_topology_tree( ts, tns );
+
+ // TRD : iterate again for everything else
+ lfds710_btree_au_init_valid_on_current_logical_core( &nna_tree_state, numa_node_id_to_numa_node_id_compare_function, LFDS710_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY, ts );
+
+ offset = 0;
+
+ while( offset < slpie_length )
+ {
+ slpie = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *) ( (char unsigned *) slpie_buffer + offset );
+
+ offset += slpie->Size;
+
+ if( slpie->Relationship == RelationNumaNode )
+ {
+ /* TRD : now for the first madness - accumulate the NUMA node records
+
+ first, try to find this node in nna_tree_state
+ if it's there, we use it - it not, we make it and add it, and use it
+ once we've got a node to work with, we add the current list of LPs to that node
+ */
+
+ rv = lfds710_btree_au_get_by_key( &nna_tree_state, NULL, (void *) &slpie->NumaNode.NodeNumber, &baue );
+
+ if( rv == 0 )
+ {
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ baue = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct lfds710_btree_au_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( *baue, (void *) &slpie->NumaNode.NodeNumber );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( *baue, tns );
+ lfds710_btree_au_insert( &nna_tree_state, baue, NULL );
+ }
+
+ // TRD : baue now points at the correct node
+ tns = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *baue );
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) slpie->NumaNode.GroupMask.Group, (lfds710_pal_uint_t) slpie->NumaNode.GroupMask.Mask );
+
+ // TRD : now all all LPs from this NUMA node to tree
+ logical_processor_number = 0;
+ bitmask = slpie->NumaNode.GroupMask.Mask;
+
+ while( bitmask != 0 )
+ {
+ if( bitmask & 0x1 )
+ libbenchmark_misc_pal_helper_add_logical_processor_node_to_topology_tree( ts, ms, logical_processor_number, RAISED, (slpie->NumaNode.GroupMask.Group) );
+
+ bitmask >>= 1;
+ logical_processor_number++;
+ }
+ }
+
+ if( slpie->Relationship == RelationGroup )
+ {
+ // TRD : we don't care about this - actually, we do care, we really REALLY hate this
+ }
+
+ if( slpie->Relationship == RelationProcessorPackage )
+ {
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ for( subloop = 0 ; subloop < slpie->Processor.GroupCount ; subloop++ )
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) slpie->Processor.GroupMask[subloop].Group, (lfds710_pal_uint_t) slpie->Processor.GroupMask[subloop].Mask );
+ libbenchmark_misc_pal_helper_add_socket_node_to_topology_tree( ts, tns );
+ }
+
+ if( slpie->Relationship == RelationProcessorCore )
+ {
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ for( subloop = 0 ; subloop < slpie->Processor.GroupCount ; subloop++ )
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) slpie->Processor.GroupMask[subloop].Group, (lfds710_pal_uint_t) slpie->Processor.GroupMask[subloop].Mask );
+ libbenchmark_misc_pal_helper_add_physical_processor_node_to_topology_tree( ts, tns );
+ }
+
+ /*
+ if( slpie->Relationship == RelationCache )
+ {
+ if( slpie->Cache.Type == CacheUnified or slpie->Cache.Type == CacheInstruction or slpie->Cache.Type == CacheData )
+ {
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ internal_populate_logical_processor_array_from_bitmask( ms, tns, (lfds710_pal_uint_t) slpie->ProcessorMask );
+ libbenchmark_misc_pal_helper_add_cache_node_to_topology_tree( ts, tns, (lfds710_pal_uint_t) slpie->Cache.Level, processor_cache_type_to_libbenchmark_topology_node_cache_type[slpie->Cache.Type] );
+ }
+ }
+ */
+ }
+
+ /* TRD : now finally insert the built-up NUMA and cache records
+ we call cleanup() on the accumulator tree - it's safe to re-use the nodes as they're emitted to the cleanup function
+ so we then throw them into the topology_state tree
+ */
+
+ lfds710_btree_au_cleanup( &nna_tree_state, nna_cleanup );
+
+ return rv;
+ }
+
+ /****************************************************************************/
+ static int numa_node_id_to_numa_node_id_compare_function( void const *new_key, void const *existing_key )
+ {
+ int
+ cr = 0;
+
+ ULONG
+ numa_node_id_existing,
+ numa_node_id_new;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ numa_node_id_new = *(ULONG *) new_key;
+ numa_node_id_existing = *(ULONG *) existing_key;
+
+ if( numa_node_id_new < numa_node_id_existing )
+ cr = -1;
+
+ if( numa_node_id_new > numa_node_id_existing )
+ cr = 1;
+
+ return cr;
+ }
+
+ /****************************************************************************/
+ static void nna_cleanup( struct lfds710_btree_au_state *baus, struct lfds710_btree_au_element *baue )
+ {
+ ULONG
+ *numa_node_id;
+
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ struct libbenchmark_topology_state
+ *ts;
+
+ LFDS710_PAL_ASSERT( baus != NULL );
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ ts = LFDS710_BTREE_AU_GET_USER_STATE_FROM_STATE( *baus );
+ numa_node_id = LFDS710_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue );
+ tns = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *baue );
+
+ libbenchmark_misc_pal_helper_add_numa_node_to_topology_tree( ts, tns, (lfds710_pal_uint_t) *numa_node_id );
+
+ return;
+ }
+
+ /****************************************************************************/
+ static void internal_populate_logical_processor_array_from_bitmask( struct libshared_memory_state *ms,
+ struct libbenchmark_topology_node_state *tns,
+ lfds710_pal_uint_t windows_processor_group_number,
+ lfds710_pal_uint_t bitmask )
+ {
+ lfds710_pal_uint_t
+ logical_processor_number = 0;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( tns != NULL );
+ // TRD : windows_processor_group_number can be any value in its range
+ // TRD : bitmask can be any value in its range
+
+ /* TRD : iterate over the bits in the bitmask
+ each is a LP number
+ add every LP to *tns
+ */
+
+ while( bitmask != 0 )
+ {
+ if( bitmask & 0x1 )
+ libbenchmark_misc_pal_helper_add_logical_processor_to_topology_node( tns, ms, logical_processor_number, RAISED, windows_processor_group_number );
+
+ bitmask >>= 1;
+ logical_processor_number++;
+ }
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && !defined KERNEL_MODE && defined __STDC__ && __STDC_HOSTED__ == 1 )
+
+ #ifdef LIBBENCHMARK_PAL_POPULATE_TOPOLOGY
+ #error More than one porting abstraction layer matches current platform in "libbenchmark_porting_abstraction_layer_populate_topology.c".
+ #endif
+
+ #define LIBBENCHMARK_PAL_POPULATE_TOPOLOGY
+
+ static void internal_populate_logical_processor_array_from_path_to_csv_hex( struct libshared_memory_state *ms,
+ struct libbenchmark_topology_node_state *tns,
+ char *path_to_csv_hex );
+ static int internal_verify_paths( lfds710_pal_uint_t number_paths, ... );
+ static void internal_read_string_from_path( char *path, char *string );
+
+ /****************************************************************************/
+ int libbenchmark_porting_abstraction_layer_populate_topology( struct libbenchmark_topology_state *ts,
+ struct libshared_memory_state *ms )
+ {
+ char
+ numa_node_path[128],
+ thread_siblings_path[128],
+ core_siblings_path[128],
+ cache_level_path[128],
+ cache_type_path[128],
+ shared_cpu_map_path[128],
+ cache_level_string[16],
+ cache_type_string[16];
+
+ int
+ rv = 1,
+ cache_type_string_to_type_enum_lookup[NUMBER_UPPERCASE_LETTERS_IN_LATIN_ALPHABET] =
+ {
+ -1, -1, -1, LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_DATA, -1, -1, -1, -1, LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_INSTRUCTION, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_UNIFIED, -1, -1, -1, -1
+ };
+
+ int long long unsigned
+ level_temp;
+
+ lfds710_pal_uint_t
+ numa_node = 0,
+ cpu_number = 0,
+ index_number,
+ level,
+ type;
+
+ struct libbenchmark_topology_iterate_state
+ tis;
+
+ struct libbenchmark_topology_node_state
+ *tns,
+ *tns_lp;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ sprintf( numa_node_path, "/sys/devices/system/node/node%llu/cpumap", (int long long unsigned) numa_node );
+
+ while( internal_verify_paths(1, numa_node_path) )
+ {
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ internal_populate_logical_processor_array_from_path_to_csv_hex( ms, tns, numa_node_path );
+ libbenchmark_misc_pal_helper_add_numa_node_to_topology_tree( ts, tns, (lfds710_pal_uint_t) numa_node );
+ sprintf( numa_node_path, "/sys/devices/system/node/node%llu/cpumap", (int long long unsigned) (++numa_node) );
+ }
+
+ sprintf( thread_siblings_path, "/sys/devices/system/cpu/cpu%llu/topology/thread_siblings", (int long long unsigned) cpu_number );
+ sprintf( core_siblings_path, "/sys/devices/system/cpu/cpu%llu/topology/core_siblings", (int long long unsigned) cpu_number );
+
+ while( internal_verify_paths(2, core_siblings_path, thread_siblings_path) )
+ {
+ libbenchmark_misc_pal_helper_add_logical_processor_node_to_topology_tree( ts, ms, cpu_number, LOWERED, 0 );
+
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ internal_populate_logical_processor_array_from_path_to_csv_hex( ms, tns, thread_siblings_path );
+ libbenchmark_misc_pal_helper_add_physical_processor_node_to_topology_tree( ts, tns );
+
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ internal_populate_logical_processor_array_from_path_to_csv_hex( ms, tns, core_siblings_path );
+ libbenchmark_misc_pal_helper_add_socket_node_to_topology_tree( ts, tns );
+
+ index_number = 0;
+
+ sprintf( cache_level_path, "/sys/devices/system/cpu/cpu%llu/cache/index%llu/level", (int long long unsigned) cpu_number, (int long long unsigned) index_number );
+ sprintf( cache_type_path, "/sys/devices/system/cpu/cpu%llu/cache/index%llu/type", (int long long unsigned) cpu_number, (int long long unsigned) index_number );
+ sprintf( shared_cpu_map_path, "/sys/devices/system/cpu/cpu%llu/cache/index%llu/shared_cpu_map", (int long long unsigned) cpu_number, (int long long unsigned) index_number );
+
+ while( internal_verify_paths(3, cache_level_path, cache_type_path, shared_cpu_map_path) )
+ {
+ internal_read_string_from_path( cache_level_path, cache_level_string );
+ sscanf( cache_level_string, "%llx", &level_temp );
+ level = (lfds710_pal_uint_t) level_temp;
+
+ internal_read_string_from_path( cache_type_path, cache_type_string );
+ type = cache_type_string_to_type_enum_lookup[(int)(*cache_type_string - 'A')];
+
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ internal_populate_logical_processor_array_from_path_to_csv_hex( ms, tns, shared_cpu_map_path );
+ libbenchmark_misc_pal_helper_add_cache_node_to_topology_tree( ts, tns, level, type );
+
+ index_number++;
+
+ sprintf( cache_level_path, "/sys/devices/system/cpu/cpu%llu/cache/index%llu/level", (int long long unsigned) cpu_number, (int long long unsigned) index_number );
+ sprintf( cache_type_path, "/sys/devices/system/cpu/cpu%llu/cache/index%llu/type", (int long long unsigned) cpu_number, (int long long unsigned) index_number );
+ sprintf( shared_cpu_map_path, "/sys/devices/system/cpu/cpu%llu/cache/index%llu/shared_cpu_map", (int long long unsigned) cpu_number, (int long long unsigned) index_number );
+ }
+
+ cpu_number++;
+
+ sprintf( thread_siblings_path, "/sys/devices/system/cpu/cpu%llu/topology/thread_siblings", (int long long unsigned) cpu_number );
+ sprintf( core_siblings_path, "/sys/devices/system/cpu/cpu%llu/topology/core_siblings", (int long long unsigned) cpu_number );
+ }
+
+ // TRD : now make and populate the notional system node
+ libbenchmark_misc_pal_helper_new_topology_node( &tns, ms );
+ libbenchmark_topology_iterate_init( &tis, LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR );
+ while( libbenchmark_topology_iterate(ts, &tis, &tns_lp) )
+ libbenchmark_misc_pal_helper_add_logical_processor_to_topology_node( tns, ms, LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER(*tns_lp), LOWERED, 0 );
+ libbenchmark_misc_pal_helper_add_system_node_to_topology_tree( ts, tns );
+
+ return rv;
+ }
+
+ /****************************************************************************/
+ void libbenchmark_porting_abstraction_layer_topology_node_cleanup( struct libbenchmark_topology_node_state *tns )
+ {
+ LFDS710_PAL_ASSERT( tns != NULL );
+
+ lfds710_list_aso_cleanup( &tns->logical_processor_children, NULL );
+
+ return;
+ }
+
+ /****************************************************************************/
+ static void internal_populate_logical_processor_array_from_path_to_csv_hex( struct libshared_memory_state *ms,
+ struct libbenchmark_topology_node_state *tns,
+ char *path_to_csv_hex )
+ {
+ char
+ diskbuffer[BUFSIZ],
+ string[1024];
+
+ FILE
+ *diskfile;
+
+ int
+ loop;
+
+ int unsigned
+ logical_processor_foursome,
+ logical_processor_number = 0,
+ subloop;
+
+ lfds710_pal_uint_t
+ length = 0;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( tns != NULL );
+ LFDS710_PAL_ASSERT( path_to_csv_hex != NULL );
+
+ /* TRD : we're passed a format string and args, which comprise the path
+ form up the string, open the file, read the string, parse the string
+ the string consists of 32-bit bitmasks in hex separated by commas
+ no leading or trailing commas
+ */
+
+ diskfile = fopen( path_to_csv_hex, "r" );
+ setbuf( diskfile, diskbuffer );
+ fgets( string, 1024, diskfile );
+ fclose( diskfile );
+
+ while( string[length++] != '\0' );
+
+ length -= 2;
+
+ for( loop = ((int)length)-1 ; loop > -1 ; loop-- )
+ {
+ if( string[loop] == ',' )
+ continue;
+
+ sscanf( &string[loop], "%1x", &logical_processor_foursome );
+
+ for( subloop = 0 ; subloop < 4 ; subloop++ )
+ if( ( (logical_processor_foursome >> subloop) & 0x1 ) == 0x1 )
+ libbenchmark_misc_pal_helper_add_logical_processor_to_topology_node( tns, ms, logical_processor_number + subloop, LOWERED, 0 );
+
+ logical_processor_number += 4;
+ }
+
+ return;
+ }
+
+ /****************************************************************************/
+ static int internal_verify_paths( lfds710_pal_uint_t number_paths, ... )
+ {
+ FILE
+ *diskfile;
+
+ int
+ rv = 1;
+
+ lfds710_pal_uint_t
+ count = 0;
+
+ va_list
+ va;
+
+ // TRD : number_paths can be any value in its range
+
+ va_start( va, number_paths );
+
+ while( rv == 1 and count++ < number_paths )
+ if( NULL == (diskfile = fopen(va_arg(va,char *), "r")) )
+ rv = 0;
+ else
+ fclose( diskfile );
+
+ va_end( va );
+
+ return rv;
+ }
+
+ /****************************************************************************/
+ static void internal_read_string_from_path( char *path, char *string )
+ {
+ char
+ diskbuffer[BUFSIZ];
+
+ FILE
+ *diskfile;
+
+ LFDS710_PAL_ASSERT( path != NULL );
+ LFDS710_PAL_ASSERT( string != NULL );
+
+ diskfile = fopen( path, "r" );
+ setbuf( diskfile, diskbuffer );
+ fscanf( diskfile, "%s", string );
+ fclose( diskfile );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBBENCHMARK_PAL_POPULATE_TOPOLOGY )
+
+ #error No matching porting abstraction layer in "libbenchmark_porting_abstraction_layer_populate_topology.c".
+
+#endif
+
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_porting_abstraction_layer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if( defined _MSC_VER )
+ /* TRD : MSVC compiler
+
+ an unfortunately necessary hack for MSVC
+ MSVC only defines __STDC__ if /Za is given, where /Za turns off MSVC C extensions -
+ which prevents Windows header files from compiling.
+ */
+
+ #define __STDC__ 1
+ #define __STDC_HOSTED__ 1
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __STDC__ && __STDC_HOSTED__ == 1 && !defined KERNEL_MODE )
+
+ #define LIBBENCHMARK_PAL_PRINT_STRING
+
+ #include <stdio.h>
+
+ void libbenchmark_pal_print_string( char const * const string )
+ {
+ LFDS710_PAL_ASSERT( string != NULL );
+
+ printf( "%s", string );
+
+ fflush( stdout );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBBENCHMARK_PAL_PRINT_STRING )
+
+ #pragma warning( disable : 4100 )
+
+ void libbenchmark_pal_print_string( char const * const string )
+ {
+ LFDS710_PAL_ASSERT( string != NULL );
+
+ return;
+ }
+
+ #pragma warning( default : 4100 )
+
+#endif
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_results_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libbenchmark_results_cleanup( struct libbenchmark_results_state *rs )
+{
+ LFDS710_PAL_ASSERT( rs != NULL );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_results_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_result_compare_function( void const *new_key, void const *existing_key )
+{
+ int
+ rv;
+
+ struct libbenchmark_result
+ *rs_new,
+ *rs_existing;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ rs_new = (struct libbenchmark_result *) new_key;
+ rs_existing = (struct libbenchmark_result *) existing_key;
+
+ if( rs_new->datastructure_id > rs_existing->datastructure_id )
+ return 1;
+
+ if( rs_new->datastructure_id < rs_existing->datastructure_id )
+ return -1;
+
+ if( rs_new->benchmark_id > rs_existing->benchmark_id )
+ return 1;
+
+ if( rs_new->benchmark_id < rs_existing->benchmark_id )
+ return -1;
+
+ if( rs_new->lock_id > rs_existing->lock_id )
+ return 1;
+
+ if( rs_new->lock_id < rs_existing->lock_id )
+ return -1;
+
+ if( rs_new->numa_mode > rs_existing->numa_mode )
+ return 1;
+
+ if( rs_new->numa_mode < rs_existing->numa_mode )
+ return -1;
+
+ rv = libbenchmark_topology_node_compare_lpsets_function( rs_new->lpset, rs_existing->lpset );
+
+ if( rv != 0 )
+ return rv;
+
+ rv = libbenchmark_topology_node_compare_nodes_function( &rs_new->tns, &rs_existing->tns );
+
+ // TRD : for better or worse, it's what we are :-)
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_results_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_results_get_result( struct libbenchmark_results_state *rs,
+ enum libbenchmark_datastructure_id datastructure_id,
+ enum libbenchmark_benchmark_id benchmark_id,
+ enum libbenchmark_lock_id lock_id,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct lfds710_list_aso_state *lpset,
+ struct libbenchmark_topology_node_state *tns,
+ lfds710_pal_uint_t *result )
+{
+ int
+ rv;
+
+ struct lfds710_btree_au_element
+ *baue;
+
+ struct libbenchmark_result
+ *r,
+ search_key;
+
+ LFDS710_PAL_ASSERT( rs != NULL );
+ // TRD : datastructure_id can be any value in its range
+ // TRD : benchmark_id can be any value in its range
+ // TRD : lock_id can be any value in its range
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( lpset != NULL );
+ LFDS710_PAL_ASSERT( tns!= NULL );
+ LFDS710_PAL_ASSERT( result != NULL );
+
+ search_key.datastructure_id = datastructure_id;
+ search_key.benchmark_id = benchmark_id;
+ search_key.lock_id = lock_id;
+ search_key.numa_mode = numa_mode;
+ search_key.lpset = lpset;
+ search_key.tns = *tns;
+
+ rv = lfds710_btree_au_get_by_key( &rs->results_tree, NULL, &search_key, &baue );
+
+ if( rv == 1 )
+ {
+ r = LFDS710_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue );
+ *result = r->result;
+ }
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_results_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_results_init( struct libbenchmark_results_state *rs,
+ struct libshared_memory_state *ms )
+{
+ LFDS710_PAL_ASSERT( rs != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ lfds710_btree_au_init_valid_on_current_logical_core( &rs->results_tree, libbenchmark_result_compare_function, LFDS710_BTREE_AU_EXISTING_KEY_FAIL, NULL );
+
+ rs->ms = ms;
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_results_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_results_put_result( struct libbenchmark_results_state *rs,
+ enum libbenchmark_datastructure_id datastructure_id,
+ enum libbenchmark_benchmark_id benchmark_id,
+ enum libbenchmark_lock_id lock_id,
+ enum libbenchmark_topology_numa_mode numa_mode,
+ struct lfds710_list_aso_state *lpset,
+ lfds710_pal_uint_t logical_processor_number,
+ lfds710_pal_uint_t windows_logical_processor_group_number,
+ lfds710_pal_uint_t result )
+{
+ struct libbenchmark_result
+ *r;
+
+ LFDS710_PAL_ASSERT( rs != NULL );
+ // TRD : datastructure_id can be any value in its range
+ // TRD : benchmark_id can be any value in its range
+ // TRD : lock_id can be any value in its range
+ // TRD : numa_mode can be any value in its range
+ LFDS710_PAL_ASSERT( lpset != NULL );
+ // TRD : logical_processor_number can be any value in its range
+ // TRD : windows_logical_processor_group_number can be any value in its range
+ // TRD : result can be any value in its range
+
+ r = libshared_memory_alloc_from_most_free_space_node( rs->ms, sizeof(struct libbenchmark_result), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ r->benchmark_id = benchmark_id;
+ r->datastructure_id = datastructure_id;
+ r->lock_id = lock_id;
+ r->numa_mode = numa_mode;
+ r->lpset = lpset;
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE( r->tns, LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR );
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_LOGICAL_PROCESSOR_NUMBER( r->tns, logical_processor_number );
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_WINDOWS_GROUP_NUMBER( r->tns, windows_logical_processor_group_number );
+ r->result = result;
+
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( r->baue, r );
+
+ lfds710_btree_au_insert( &rs->results_tree, &r->baue, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_threadset_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libbenchmark_threadset_cleanup( struct libbenchmark_threadset_state *pts )
+{
+ LFDS710_PAL_ASSERT( pts != NULL );
+
+ // TRD : we do naaauuuuthin'
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_threadset_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_threadset_init( struct libbenchmark_threadset_state *tsets,
+ struct libbenchmark_topology_state *ts,
+ struct lfds710_list_aso_state *logical_processor_set,
+ struct libshared_memory_state *ms,
+ libshared_pal_thread_return_t (LIBSHARED_PAL_THREAD_CALLING_CONVENTION *thread_function)( void *thread_user_state ),
+ void *users_threadset_state )
+{
+ struct lfds710_list_aso_element
+ *lasoe = NULL;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_threadset_per_numa_state
+ *pns = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ struct libbenchmark_topology_node_state
+ *tns,
+ *tns_numa_node;
+
+ LFDS710_PAL_ASSERT( tsets != NULL );
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( logical_processor_set != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( thread_function != NULL );
+ // TRD : users_threadset_state can be NULL
+
+ tsets->threadset_start_flag = LOWERED;
+
+ tsets->thread_function = thread_function;
+ tsets->users_threadset_state = users_threadset_state;
+ lfds710_list_asu_init_valid_on_current_logical_core( &tsets->list_of_per_numa_states, NULL );
+ lfds710_list_asu_init_valid_on_current_logical_core( &tsets->list_of_per_thread_states, NULL );
+
+ /* TRD : loop over the logical_processor_set
+ make a thread_state for each
+ and make a NUMA node state for each unique NUMA node
+ */
+
+ while( LFDS710_LIST_ASO_GET_START_AND_THEN_NEXT(*logical_processor_set,lasoe) )
+ {
+ tns = LFDS710_LIST_ASO_GET_VALUE_FROM_ELEMENT( *lasoe );
+
+ // TRD : first, make a NUMA node entry, if we need to
+
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR, tns, &tns_numa_node );
+
+ if( tns_numa_node != NULL )
+ {
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_numa_states,lasue) )
+ {
+ pns = LFDS710_LIST_ASO_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ if( pns->numa_node_id == LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*tns_numa_node) )
+ break;
+ }
+
+ if( lasue == NULL )
+ {
+ pns = libshared_memory_alloc_from_specific_node( ms, LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID(*tns_numa_node), sizeof(struct libbenchmark_threadset_per_numa_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ pns->numa_node_id = LIBBENCHMARK_TOPOLOGY_NODE_GET_NUMA_ID( *tns_numa_node );
+ pns->users_per_numa_state = NULL;
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( pns->lasue, pns );
+ lfds710_list_asu_insert_at_start( &tsets->list_of_per_numa_states, &pns->lasue );
+ }
+ }
+
+ // TRD : now make a thread entry (pns now points at the correct NUMA node entry)
+ if( pns == NULL )
+ pts = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_threadset_per_thread_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ else
+ pts = libshared_memory_alloc_from_specific_node( ms, pns->numa_node_id, sizeof(struct libbenchmark_threadset_per_thread_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ pts->thread_ready_flag = LOWERED;
+ pts->threadset_start_flag = &tsets->threadset_start_flag;
+ pts->tns_lp = tns;
+ pts->numa_node_state = pns; // TRD : pns is NULL on SMP
+ pts->threadset_state = tsets;
+ pts->users_per_thread_state = NULL;
+
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( pts->lasue, pts );
+ lfds710_list_asu_insert_at_start( &tsets->list_of_per_thread_states, &pts->lasue );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_threadset_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_threadset_run( struct libbenchmark_threadset_state *tsets )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libbenchmark_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ pts->pti.logical_processor_number = LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER( *pts->tns_lp );
+ pts->pti.windows_processor_group_number = LIBBENCHMARK_TOPOLOGY_NODE_GET_WINDOWS_GROUP_NUMBER( *pts->tns_lp );
+ pts->pti.thread_function = tsets->thread_function;
+ pts->pti.thread_argument = pts;
+
+ libshared_pal_thread_start( &pts->thread_handle, &pts->pti );
+ }
+
+ tsets->threadset_start_flag = RAISED;
+
+ LFDS710_PAL_ASSERT( tsets != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(tsets->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ libshared_pal_thread_wait( pts->thread_handle );
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_threadset_thread_ready_and_wait( struct libbenchmark_threadset_per_thread_state *pts )
+{
+ LFDS710_PAL_ASSERT( pts != NULL );
+
+ pts->thread_ready_flag = RAISED;
+
+ LFDS710_MISC_BARRIER_FULL;
+
+ while( *pts->threadset_start_flag == LOWERED )
+ LFDS710_MISC_BARRIER_LOAD;
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_topology_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_topology_cleanup( struct libbenchmark_topology_state *ts )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+
+ lfds710_btree_au_cleanup( &ts->topology_tree, NULL );
+
+ lfds710_btree_au_cleanup( &ts->lp_printing_offset_lookup_tree, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_topology_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_topology_compare_lp_printing_offsets_function( void const *new_key, void const *existing_key )
+{
+ int
+ cr;
+
+ struct libbenchmark_topology_lp_printing_offset
+ *tlpo_one,
+ *tlpo_two;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ tlpo_one = (struct libbenchmark_topology_lp_printing_offset *) new_key;
+ tlpo_two = (struct libbenchmark_topology_lp_printing_offset *) existing_key;
+
+ cr = libbenchmark_topology_node_compare_nodes_function( &tlpo_one->tns, &tlpo_two->tns );
+
+ return cr;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_topology_compare_node_against_lp_printing_offset_function( void const *new_key, void const *existing_key )
+{
+ int
+ cr;
+
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ struct libbenchmark_topology_lp_printing_offset
+ *tlpo;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ tns = (struct libbenchmark_topology_node_state *) new_key;
+ tlpo = (struct libbenchmark_topology_lp_printing_offset *) existing_key;
+
+ cr = libbenchmark_topology_node_compare_nodes_function( tns, &tlpo->tns );
+
+ return cr;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_topology_internal.h"
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_topology_init( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms )
+{
+ int
+ offset = 0,
+ rv;
+
+ lfds710_pal_uint_t
+ lp_count;
+
+ struct lfds710_btree_au_element
+ *baue;
+
+ struct libbenchmark_topology_lp_printing_offset
+ *tlpo;
+
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ lfds710_btree_au_init_valid_on_current_logical_core( &ts->topology_tree, libbenchmark_topology_node_compare_nodes_function, LFDS710_BTREE_AU_EXISTING_KEY_FAIL, NULL );
+
+ rv = libbenchmark_porting_abstraction_layer_populate_topology( ts, ms );
+
+ lfds710_btree_au_get_by_absolute_position( &ts->topology_tree, &baue, LFDS710_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE );
+ lp_count = count_of_logical_processors_below_node( baue );
+ ts->line_width = (int) ( lp_count * 3 + lp_count - 1 );
+
+ // TRD : now form up the printing offset tree
+ lfds710_btree_au_init_valid_on_current_logical_core( &ts->lp_printing_offset_lookup_tree, libbenchmark_topology_compare_lp_printing_offsets_function, LFDS710_BTREE_AU_EXISTING_KEY_FAIL, NULL );
+
+ baue = NULL;
+
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &baue, LFDS710_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ tns = LFDS710_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue );
+
+ if( tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ {
+ tlpo = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_lp_printing_offset), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ tlpo->tns = *tns;
+ tlpo->offset = offset;
+
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( tlpo->baue, tlpo );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( tlpo->baue, tlpo );
+
+ lfds710_btree_au_insert( &ts->lp_printing_offset_lookup_tree, &tlpo->baue, NULL );
+
+ offset += 4;
+ }
+ }
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_topology_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_topology_insert( struct libbenchmark_topology_state *ts, struct libbenchmark_topology_node_state *tns )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( tns != NULL );
+
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( tns->baue, tns );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( tns->baue, tns );
+ lfds710_btree_au_insert( &ts->topology_tree, &tns->baue, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** structs *****/
+struct libbenchmark_topology_lp_printing_offset
+{
+ int
+ offset;
+
+ struct lfds710_btree_au_element
+ baue;
+
+ struct libbenchmark_topology_node_state
+ tns;
+};
+
+/***** private prototypes *****/
+lfds710_pal_uint_t count_of_logical_processors_below_node( struct lfds710_btree_au_element *baue );
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_topology_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_topology_iterate_init( struct libbenchmark_topology_iterate_state *tis, enum libbenchmark_topology_node_type type )
+{
+ LFDS710_PAL_ASSERT( tis != NULL );
+ // TRD : type can be any value in its range
+
+ tis->baue = NULL;
+ tis->type = type;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_topology_iterate( struct libbenchmark_topology_state *ts, struct libbenchmark_topology_iterate_state *tis, struct libbenchmark_topology_node_state **tns )
+{
+ int
+ rv = 1;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( tis != NULL );
+ LFDS710_PAL_ASSERT( tns != NULL );
+
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &tis->baue, LFDS710_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ *tns = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *tis->baue );
+
+ if( (*tns)->type == tis->type )
+ break;
+ }
+
+ if( tis->baue == NULL )
+ {
+ *tns = NULL;
+ rv = 0;
+ }
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_topology_internal.h"
+
+/***** private prototypes *****/
+static void libbenchmark_topology_internal_generate_thread_set_one_lp_per_lowest_level_cache( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, struct lfds710_list_asu_state *lp_sets );
+static void libbenchmark_topology_internal_generate_thread_set_one_to_all_lps_per_lowest_level_cache( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, struct lfds710_list_asu_state *lp_sets );
+static void libbenchmark_topology_internal_generate_thread_set_all_lps_per_lowest_level_cache( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, struct lfds710_list_asu_state *lp_sets );
+static void libbenchmark_topology_internal_generate_thread_set_all_lps( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, struct lfds710_list_asu_state *lp_sets );
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_topology_generate_deduplicated_logical_processor_sets( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, struct lfds710_list_asu_state *lp_sets )
+{
+ int
+ cr;
+
+ struct lfds710_list_asu_element
+ *local_lasue = NULL,
+ *lasue;
+
+ struct lfds710_list_asu_state
+ throw_lp_sets,
+ local_lp_sets;
+
+ struct libbenchmark_topology_logical_processor_set
+ *local_lps,
+ *lps,
+ *new_lps;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( lp_sets != NULL );
+
+ lfds710_list_asu_init_valid_on_current_logical_core( &throw_lp_sets, NULL );
+ lfds710_list_asu_init_valid_on_current_logical_core( &local_lp_sets, NULL );
+
+ // TRD : order is a useful hack - we want the full set to come last after deduplication, this order achieves this
+ libbenchmark_topology_internal_generate_thread_set_one_lp_per_lowest_level_cache( ts, ms, &local_lp_sets );
+ libbenchmark_topology_internal_generate_thread_set_all_lps_per_lowest_level_cache( ts, ms, &local_lp_sets );
+ libbenchmark_topology_internal_generate_thread_set_one_to_all_lps_per_lowest_level_cache( ts, ms, &local_lp_sets );
+ libbenchmark_topology_internal_generate_thread_set_all_lps( ts, ms, &throw_lp_sets );
+
+ /*
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(local_lp_sets, local_lasue) )
+ {
+ char
+ *lps_string;
+
+ local_lps = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *local_lasue );
+
+ benchmark_topology_construct_benchmark_logical_processor_set_string( t, local_lps, &lps_string );
+ printf( "%s\n", lps_string );
+ }
+
+ exit( 1 );
+
+ */
+
+ /* TRD : now de-duplicate local_lp_sets
+ dumbo algorithm, loop over every value in local_lp_sets
+ and if not present in lp_sets, add to lp_sets
+
+ algorithmically better to sort and then pass over once, removing duplicates
+ however, this is not a coding test - it's real life - and in this case
+ the extra coding work makes no sense at all
+ */
+
+ lfds710_list_asu_init_valid_on_current_logical_core( lp_sets, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(local_lp_sets, local_lasue) )
+ {
+ local_lps = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *local_lasue );
+
+ cr = !0;
+ lasue = NULL;
+
+ // TRD : exit loop if cr is 0, which means we found the set exists already
+ while( cr != 0 and LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*lp_sets, lasue) )
+ {
+ lps = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ cr = libbenchmark_topology_node_compare_lpsets_function( &local_lps->logical_processors, &lps->logical_processors );
+ }
+
+ // TRD : if we did NOT find this set already, then keep it
+ if( cr != 0 )
+ {
+ new_lps = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_logical_processor_set), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ new_lps->logical_processors = local_lps->logical_processors;
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( new_lps->lasue, new_lps );
+ lfds710_list_asu_insert_at_end( lp_sets, &new_lps->lasue );
+ }
+ }
+
+ lfds710_list_asu_cleanup( &local_lp_sets, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void libbenchmark_topology_internal_generate_thread_set_one_lp_per_lowest_level_cache( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, struct lfds710_list_asu_state *lp_sets )
+{
+ lfds710_pal_uint_t
+ loop,
+ lowest_level_type_count = 0,
+ lps_count;
+
+ struct lfds710_btree_au_element
+ *be = NULL,
+ *be_llc = NULL,
+ *be_lp = NULL;
+
+ struct libbenchmark_topology_logical_processor_set
+ *lps;
+
+ struct libbenchmark_topology_node_state
+ *new_tns,
+ *node = NULL,
+ *node_llc = NULL,
+ *node_lp = NULL;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( lp_sets != NULL );
+
+ /* TRD : we find the lowest level cache type
+ there are a certain number of these caches in the system
+ each will service a certain number of logical processors
+
+ we create one thread set per lowest level cache type,
+ with the the first thread set having only the first lowest
+ level cache and following sets adding one more lowest level
+ cache at a time, until the final thread set has all the
+ lowest level caches; for each lowest level cache in a thread set,
+ that thread set has the first logical processor of each lowest
+ level cache
+ */
+
+ // TRD : find lowest level memory, bus or cache
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &be_llc, LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node_llc = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be_llc );
+
+ if( node_llc->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_SYSTEM or node_llc->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA or node_llc->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_CACHE )
+ break;
+ }
+
+ // TRD : count the number of the lowest level type
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &be, LFDS710_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be );
+
+ if( 0 == libbenchmark_topology_node_compare_node_types_function(node, node_llc) )
+ lowest_level_type_count++;
+ }
+
+ // TRD : create the thread sets
+ for( loop = 0 ; loop < lowest_level_type_count ; loop++ )
+ {
+ /* TRD : find the first 0 to (loop+1) lowest level types
+ add the smallest LP under that type to the thread set
+ */
+
+ lps = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_logical_processor_set), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_init_valid_on_current_logical_core( &lps->logical_processors, libbenchmark_topology_node_compare_nodes_function, LFDS710_LIST_ASO_INSERT_RESULT_FAILURE_EXISTING_KEY, NULL );
+ lps_count = 0;
+ be = NULL;
+
+ while( lps_count < loop+1 and lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &be, LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be );
+
+ // TRD : if we've found a lowest level type...
+ if( 0 == libbenchmark_topology_node_compare_node_types_function(node, node_llc) )
+ {
+ // TRD : now use a temp copy of be and go LARGEST_TO_SMALLEST until we find an LP
+ be_lp = be;
+
+ while( lfds710_btree_au_get_by_relative_position(&be_lp, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node_lp = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be_lp );
+
+ if( node_lp->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ break;
+ }
+
+ // TRD : now add LP
+ new_tns = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_node_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ *new_tns = *node_lp;
+ LFDS710_LIST_ASO_SET_KEY_IN_ELEMENT( new_tns->lasoe, new_tns );
+ LFDS710_LIST_ASO_SET_VALUE_IN_ELEMENT( new_tns->lasoe, new_tns );
+ lfds710_list_aso_insert( &lps->logical_processors, &new_tns->lasoe, NULL );
+ lps_count++;
+ }
+ }
+
+ LFDS710_LIST_ASU_SET_KEY_IN_ELEMENT( lps->lasue, lps );
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( lps->lasue, lps );
+ lfds710_list_asu_insert_at_end( lp_sets, &lps->lasue );
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void libbenchmark_topology_internal_generate_thread_set_one_to_all_lps_per_lowest_level_cache( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, struct lfds710_list_asu_state *lp_sets )
+{
+ lfds710_pal_uint_t
+ loop,
+ lowest_level_type_count = 0,
+ lp_per_llc = 0,
+ lp_count,
+ lps_count;
+
+ struct lfds710_btree_au_element
+ *be = NULL,
+ *be_llc = NULL,
+ *be_lp = NULL;
+
+ struct libbenchmark_topology_logical_processor_set
+ *lps;
+
+ struct libbenchmark_topology_node_state
+ *new_lp,
+ *node = NULL,
+ *node_llc = NULL,
+ *node_lp = NULL;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( lp_sets != NULL );
+
+ /* TRD : we find the lowest level cache type
+ there are a certain number of these caches in the system
+ each will service a certain number of logical processors
+
+ we create one thread set per logical processor under
+ the lowest level type (they will have all have the same
+ number of logical processors)
+
+ each set contains an increasing number of LPs from each
+ lowest level type, e.g. the first set has one LP from
+ each lowest level type, the second has two, enode, until
+ s all LPs are in use
+ */
+
+ // TRD : find lowest level memory, bus or cache
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &be_llc, LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node_llc = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be_llc );
+
+ if( node_llc->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_SYSTEM or node_llc->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA or node_llc->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_CACHE )
+ break;
+ }
+
+ /* TRD : count the number of LPs under the lowest level type
+ since be_llc points at the smallest of the lowest level types
+ we will once we've counted all it's LPs naturally exit the tree
+ since we're walking LFDS710_ADDONLY_UNBALANCED_BTREE_WALK_FROM_LARGEST_TO_SMALLEST
+ */
+ be_lp = be_llc;
+
+ while( lfds710_btree_au_get_by_relative_position(&be_lp, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node_lp = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be_lp );
+
+ if( node_lp->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ lp_per_llc++;
+ }
+
+ // TRD : count the number of the lowest level type
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &be, LFDS710_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be );
+
+ if( 0 == libbenchmark_topology_node_compare_node_types_function(node, node_llc) )
+ lowest_level_type_count++;
+ }
+
+ // TRD : create the thread sets
+ for( loop = 0 ; loop < lp_per_llc ; loop++ )
+ {
+ /* TRD : visit each lowest level type
+ add from 0 to loop+1 of its LPs to the set
+ */
+
+ lps = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_logical_processor_set), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_init_valid_on_current_logical_core( &lps->logical_processors, libbenchmark_topology_node_compare_nodes_function, LFDS710_LIST_ASO_INSERT_RESULT_FAILURE_EXISTING_KEY, NULL );
+ lps_count = 0;
+
+ be = NULL;
+
+ while( lps_count < lowest_level_type_count*(loop+1) and lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &be, LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be );
+
+ // TRD : if we've found a lowest level type...
+ if( 0 == libbenchmark_topology_node_compare_node_types_function(node, node_llc) )
+ {
+ // TRD : now use a temp copy of be and go LARGEST_TO_SMALLEST until we have (loop+1) LPs
+ be_lp = be;
+ lp_count = 0;
+
+ while( lp_count < loop+1 and lfds710_btree_au_get_by_relative_position(&be_lp, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node_lp = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be_lp );
+
+ if( node_lp->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ {
+ new_lp = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_node_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ *new_lp = *node_lp;
+ LFDS710_LIST_ASO_SET_KEY_IN_ELEMENT( new_lp->lasoe, new_lp );
+ LFDS710_LIST_ASO_SET_VALUE_IN_ELEMENT( new_lp->lasoe, new_lp );
+ lfds710_list_aso_insert( &lps->logical_processors, &new_lp->lasoe, NULL );
+ lp_count++;
+ lps_count++;
+ }
+ }
+ }
+ }
+
+ LFDS710_LIST_ASU_SET_KEY_IN_ELEMENT( lps->lasue, lps );
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( lps->lasue, lps );
+ lfds710_list_asu_insert_at_end( lp_sets, &lps->lasue );
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void libbenchmark_topology_internal_generate_thread_set_all_lps_per_lowest_level_cache( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, struct lfds710_list_asu_state *lp_sets )
+{
+ lfds710_pal_uint_t
+ loop,
+ lowest_level_type_count = 0,
+ lp_per_llc = 0,
+ lps_count;
+
+ struct lfds710_btree_au_element
+ *be = NULL,
+ *be_llc = NULL,
+ *be_lp = NULL;
+
+ struct libbenchmark_topology_logical_processor_set
+ *lps;
+
+ struct libbenchmark_topology_node_state
+ *new_lp,
+ *node = NULL,
+ *node_llc = NULL,
+ *node_lp = NULL;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( lp_sets != NULL );
+
+ /* TRD : we find the lowest level cache type
+ there are a certain number of these caches in the system
+ each will service a certain number of logical processors
+
+ each lowest level type has a given number of logical processors
+ (the number is the same for each lowest level type)
+ we create one thread set per lowest level type,
+ where we're adding in full blocks of lowest level type LPs,
+ e.g. the first set has all the LPs of the first lowest level
+ type, the second set has all the LPs of the first and second
+ lowest level types, enode
+ */
+
+ // TRD : find lowest level memory, bus or cache
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &be_llc, LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node_llc = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be_llc );
+
+ if( node_llc->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_SYSTEM or node_llc->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA or node_llc->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_CACHE )
+ break;
+ }
+
+ /* TRD : count the number of LPs under the lowest level type
+ since be_llc points at the smallest of the lowest level types
+ we will once we've counted all it's LPs naturally exit the tree
+ since we're walking LFDS710_ADDONLY_UNBALANCED_BTREE_WALK_FROM_LARGEST_TO_SMALLEST
+ */
+ be_lp = be_llc;
+
+ while( lfds710_btree_au_get_by_relative_position(&be_lp, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node_lp = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be_lp );
+
+ if( node_lp->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ lp_per_llc++;
+ }
+
+ // TRD : count the number of the lowest level type
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &be, LFDS710_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be );
+
+ if( 0 == libbenchmark_topology_node_compare_node_types_function(node, node_llc) )
+ lowest_level_type_count++;
+ }
+
+ // TRD : create the thread sets
+ for( loop = 0 ; loop < lowest_level_type_count ; loop++ )
+ {
+ /* TRD : find the first 0 to (loop+1) lowest level types
+ add all LPs under those lowest level types to the set
+ */
+
+ lps = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_logical_processor_set), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_init_valid_on_current_logical_core( &lps->logical_processors, libbenchmark_topology_node_compare_nodes_function, LFDS710_LIST_ASO_INSERT_RESULT_FAILURE_EXISTING_KEY, NULL );
+ lps_count = 0;
+ be = NULL;
+
+ while( lps_count < lp_per_llc*(loop+1) and lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &be, LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be );
+
+ // TRD : if we've found a lowest level type...
+ if( 0 == libbenchmark_topology_node_compare_node_types_function(node, node_llc) )
+ {
+ // TRD : now use a temp copy of be and go LARGEST_TO_SMALLEST until we exit the tree or find another lowest level type
+ be_lp = be;
+
+ while( lfds710_btree_au_get_by_relative_position(&be_lp, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node_lp = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be_lp );
+
+ if( node_lp->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ {
+ // TRD : now add LP
+ new_lp = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_node_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ *new_lp = *node_lp;
+ LFDS710_LIST_ASO_SET_KEY_IN_ELEMENT( new_lp->lasoe, new_lp );
+ LFDS710_LIST_ASO_SET_VALUE_IN_ELEMENT( new_lp->lasoe, new_lp );
+ lfds710_list_aso_insert( &lps->logical_processors, &new_lp->lasoe, NULL );
+ lps_count++;
+ }
+
+ if( node_lp->type == node_llc->type )
+ break;
+ }
+ }
+ }
+
+ LFDS710_LIST_ASU_SET_KEY_IN_ELEMENT( lps->lasue, lps );
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( lps->lasue, lps );
+ lfds710_list_asu_insert_at_end( lp_sets, &lps->lasue );
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void libbenchmark_topology_internal_generate_thread_set_all_lps( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, struct lfds710_list_asu_state *lp_sets )
+{
+ struct lfds710_btree_au_element
+ *be = NULL;
+
+ struct libbenchmark_topology_logical_processor_set
+ *lps;
+
+ struct libbenchmark_topology_node_state
+ *new_lp,
+ *node;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( lp_sets != NULL );
+
+ lps = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_logical_processor_set), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_list_aso_init_valid_on_current_logical_core( &lps->logical_processors, libbenchmark_topology_node_compare_nodes_function, LFDS710_LIST_ASO_INSERT_RESULT_FAILURE_EXISTING_KEY, NULL );
+
+ // TRD : iterate over tree - add in every logical processor
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &be, LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ node = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *be );
+
+ if( node->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ {
+ // TRD : now add LP
+ new_lp = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_node_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ *new_lp = *node;
+ LFDS710_LIST_ASO_SET_KEY_IN_ELEMENT( new_lp->lasoe, new_lp );
+ LFDS710_LIST_ASO_SET_VALUE_IN_ELEMENT( new_lp->lasoe, new_lp );
+ lfds710_list_aso_insert( &lps->logical_processors, &new_lp->lasoe, NULL );
+ }
+ }
+
+ LFDS710_LIST_ASU_SET_KEY_IN_ELEMENT( lps->lasue, lps );
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( lps->lasue, lps );
+ lfds710_list_asu_insert_at_end( lp_sets, &lps->lasue );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_topology_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_topology_generate_numa_modes_list( struct libbenchmark_topology_state *ts, enum libbenchmark_topology_numa_mode numa_mode, struct libshared_memory_state *ms, struct lfds710_list_asu_state *numa_modes_list )
+{
+ lfds710_pal_uint_t
+ numa_node_count;
+
+ struct libbenchmark_topology_numa_node
+ *tnn;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP or numa_mode == LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( numa_modes_list != NULL );
+
+ lfds710_list_asu_init_valid_on_current_logical_core( numa_modes_list, NULL );
+
+ switch( numa_mode )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP:
+ tnn = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_numa_node), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ tnn->mode = LIBBENCHMARK_TOPOLOGY_NUMA_MODE_SMP;
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( tnn->lasue, tnn );
+ lfds710_list_asu_insert_at_start( numa_modes_list, &tnn->lasue );
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA:
+ libbenchmark_topology_query( ts, LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMBER_OF_NODE_TYPE, (void *) (lfds710_pal_uint_t) LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA, &numa_node_count );
+
+ if( numa_node_count == 1 )
+ {
+ tnn = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_numa_node), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ tnn->mode = LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA;
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( tnn->lasue, tnn );
+ lfds710_list_asu_insert_at_start( numa_modes_list, &tnn->lasue );
+ }
+
+ if( numa_node_count >= 2 )
+ {
+ tnn = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_numa_node), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ tnn->mode = LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA;
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( tnn->lasue, tnn );
+ lfds710_list_asu_insert_at_start( numa_modes_list, &tnn->lasue );
+
+ tnn = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libbenchmark_topology_numa_node), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ tnn->mode = LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED;
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( tnn->lasue, tnn );
+ lfds710_list_asu_insert_at_start( numa_modes_list, &tnn->lasue );
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NUMA_MODE_NUMA_BUT_NOT_USED:
+ // TRD : not used
+ break;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_topology_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_topology_query( struct libbenchmark_topology_state *ts, enum libbenchmark_topology_query query_type, void *query_input, void *query_output )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+
+ // TRD : query type can be any value in its range
+ // TRD : query_input can be NULL in some cases
+ // TRD : query_outputput can be NULL in some cases
+
+ switch( query_type )
+ {
+ case LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMBER_OF_NODE_TYPE:
+ {
+ enum libbenchmark_topology_node_type
+ type;
+
+ lfds710_pal_uint_t
+ *count;
+
+ struct lfds710_btree_au_element
+ *baue = NULL;
+
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ // TRD : query_input is an enum and so can be 0
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ type = (enum libbenchmark_topology_node_type) (lfds710_pal_uint_t) query_input;
+ count = (lfds710_pal_uint_t *) query_output;
+
+ *count = 0;
+
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &baue, LFDS710_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ tns = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *baue );
+
+ if( tns->type == type )
+ (*count)++;
+ }
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_QUERY_GET_NUMA_NODE_FOR_LOGICAL_PROCESSOR:
+ {
+ struct lfds710_btree_au_element
+ *baue;
+
+ struct libbenchmark_topology_node_state
+ *tns_lp,
+ *tns = NULL;
+
+ LFDS710_PAL_ASSERT( query_input != NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ *(struct libbenchmark_topology_node_state **) query_output = NULL;
+
+ tns_lp = (struct libbenchmark_topology_node_state *) query_input;
+
+ // TRD : find the LP, the climb the tree to the first larger NUMA node
+
+ lfds710_btree_au_get_by_key( &ts->topology_tree, NULL, tns_lp, &baue );
+
+ while( lfds710_btree_au_get_by_relative_position(&baue,LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ tns = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *baue );
+
+ if( tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA )
+ break;
+ }
+
+ if( tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA )
+ *(struct libbenchmark_topology_node_state **) query_output = tns;
+ }
+ break;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_topology_internal.h"
+
+/***** defines *****/
+#define NUMBER_KEY_LINES 8
+
+/***** structs *****/
+struct line
+{
+ char
+ *string;
+
+ enum libbenchmark_topology_node_type
+ type;
+
+ struct lfds710_list_asu_element
+ lasue;
+
+ union libbenchmark_topology_node_extended_info
+ extended_node_info;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+static void strcat_spaces( char *string, lfds710_pal_uint_t number_width );
+
+
+
+
+
+/****************************************************************************/
+char *libbenchmark_topology_generate_string( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, enum libbenchmark_topology_string_format format )
+{
+ char const
+ cache_type_enum_to_string_lookup[LIBBENCHMARK_TOPOLOGY_NODE_CACHE_TYPE_COUNT] =
+ {
+ 'D', 'I', 'U'
+ },
+ *const key_strings[NUMBER_KEY_LINES] =
+ {
+ "R = Notional system root ",
+ "N = NUMA node ",
+ "S = Socket (physical package)",
+ "LnU = Level n unified cache ",
+ "LnD = Level n data cache ",
+ "LnI = Level n instruction cache",
+ "P = Physical core ",
+ "nnn = Logical core ",
+ },
+ *const empty_key_string = " ";
+
+ char
+ *topology_string;
+
+ int
+ final_length = 0,
+ half_space_width,
+ loop,
+ space_width;
+
+ lfds710_pal_uint_t
+ key_line,
+ lp_count,
+ number_topology_lines,
+ number_key_and_topology_lines;
+
+ struct lfds710_btree_au_element
+ *baue;
+
+ struct line
+ *line;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_list_asu_state
+ lines;
+
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : format can be any value in its range
+
+ lfds710_list_asu_init_valid_on_current_logical_core( &lines, NULL );
+
+ baue = NULL;
+
+ while( lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&ts->topology_tree, &baue, LFDS710_BTREE_AU_ABSOLUTE_POSITION_LARGEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ tns = LFDS710_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue );
+
+ /* TRD : look for this node type in the list of lines
+ if it's not there, add it to the end
+ if it is there, use it
+ */
+
+ if( 0 == lfds710_list_asu_get_by_key(&lines, key_compare_function, tns, &lasue) )
+ {
+ line = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct line), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ line->type = tns->type;
+ line->extended_node_info = tns->extended_node_info;
+ // TRD : +2 for trailing space and for trailing NULL
+ line->string = libshared_memory_alloc_from_most_free_space_node( ms, ts->line_width+2, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ *line->string = '\0';
+ LFDS710_LIST_ASU_SET_KEY_IN_ELEMENT( line->lasue, line );
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( line->lasue, line );
+ lfds710_list_asu_insert_at_end( &lines, &line->lasue );
+ }
+ else
+ line = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ switch( tns->type )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR:
+ // libshared_ansi_strcat( line->string, " L " );
+ libshared_ansi_strcat_number_with_leading_zeros( line->string, LIBBENCHMARK_TOPOLOGY_NODE_GET_LOGICAL_PROCESSOR_NUMBER(*tns), 3 );
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NODE_TYPE_CACHE:
+ {
+ lp_count = count_of_logical_processors_below_node( baue );
+ space_width = (int) ( lp_count * 3 + lp_count - 3 );
+ half_space_width = space_width / 2;
+
+ strcat_spaces( line->string, half_space_width );
+ libshared_ansi_strcat( line->string, "L" );
+ libshared_ansi_strcat_number( line->string, tns->extended_node_info.cache.level );
+ libshared_ansi_strcat_char( line->string, cache_type_enum_to_string_lookup[tns->extended_node_info.cache.type] );
+ strcat_spaces( line->string, half_space_width );
+ }
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NODE_TYPE_PHYSICAL_PROCESSOR:
+ lp_count = count_of_logical_processors_below_node( baue );
+ space_width = (int) ( lp_count * 3 + lp_count - 1 );
+ half_space_width = space_width / 2;
+ strcat_spaces( line->string, half_space_width );
+ libshared_ansi_strcat( line->string, "P" );
+ strcat_spaces( line->string, half_space_width );
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NODE_TYPE_SOCKET:
+ lp_count = count_of_logical_processors_below_node( baue );
+ space_width = (int) ( lp_count * 3 + lp_count - 1 );
+ half_space_width = space_width / 2;
+ strcat_spaces( line->string, half_space_width );
+ libshared_ansi_strcat( line->string, "S" );
+ strcat_spaces( line->string, half_space_width );
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NODE_TYPE_NUMA:
+ lp_count = count_of_logical_processors_below_node( baue );
+ space_width = (int) ( lp_count * 3 + lp_count - 1 );
+ half_space_width = space_width / 2;
+ strcat_spaces( line->string, half_space_width );
+ libshared_ansi_strcat( line->string, "N" );
+ strcat_spaces( line->string, half_space_width );
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NODE_TYPE_SYSTEM:
+ /* TRD : count the number of LPs below this node
+ assume for now each has caches so it three letters side
+ compute the space_width and print R in the middle
+ */
+
+ lp_count = count_of_logical_processors_below_node( baue );
+ space_width = (int) ( lp_count * 3 + lp_count - 1 );
+ half_space_width = space_width / 2;
+ strcat_spaces( line->string, half_space_width );
+ libshared_ansi_strcat( line->string, "R" );
+ strcat_spaces( line->string, half_space_width );
+ break;
+ }
+
+ libshared_ansi_strcat( line->string, " " );
+ }
+
+ /* TRD : so, we have the topology
+ but we also want to print the key on the same lines
+ the topology may have more or less lines than the key
+ if we run out of topology lines, we need to print white spaces to keep the justification correct
+ same for running out of key lines
+
+ first we compute the length of the final string, so we can allocate the topology string
+ then we actually form up the string
+ */
+
+ lfds710_list_asu_query( &lines, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_topology_lines );
+
+ if( number_topology_lines < NUMBER_KEY_LINES )
+ number_key_and_topology_lines = NUMBER_KEY_LINES;
+ else
+ number_key_and_topology_lines = number_topology_lines;
+
+ // TRD : +1 for one space, +31 for the text, +1 for final newline, +5 for gnuplot stuff ("\\n\\", which we won't need if stdout)
+ final_length = (int) ( (ts->line_width + 39) * number_key_and_topology_lines );
+
+ // TRD : and a trailing NULL
+ topology_string = libshared_memory_alloc_from_most_free_space_node( ms, final_length+1, 1 );
+ *topology_string = '\0';
+
+ // TRD : now all the fun of the faire - time to compose the string
+
+ key_line = 0;
+
+ lasue = LFDS710_LIST_ASU_GET_START( lines );
+
+ while( lasue != NULL or key_line < NUMBER_KEY_LINES )
+ {
+ // TRD : copy in a blank topology line
+ if( lasue == NULL )
+ for( loop = 0 ; loop < ts->line_width+1 ; loop++ )
+ libshared_ansi_strcat( topology_string, " " );
+
+ if( lasue != NULL )
+ {
+ line = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ libshared_ansi_strcat( topology_string, line->string );
+ lasue = LFDS710_LIST_ASU_GET_NEXT( *lasue );
+ }
+
+ // TRD : copy in a blank key line
+ if( key_line == NUMBER_KEY_LINES )
+ libshared_ansi_strcat( topology_string, empty_key_string );
+
+ if( key_line < NUMBER_KEY_LINES )
+ libshared_ansi_strcat( topology_string, key_strings[key_line++] );
+
+ switch( format )
+ {
+ case LIBBENCHMARK_TOPOLOGY_STRING_FORMAT_STDOUT:
+ libshared_ansi_strcat( topology_string, "\n" );
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_STRING_FORMAT_GNUPLOT:
+ libshared_ansi_strcat( topology_string, "\\n\\\n" );
+ break;
+ }
+ }
+
+ return topology_string;
+}
+
+
+
+
+
+/****************************************************************************/
+char *libbenchmark_topology_generate_lpset_string( struct libbenchmark_topology_state *ts, struct libshared_memory_state *ms, struct lfds710_list_aso_state *lpset )
+{
+ char
+ *lpset_string = NULL;
+
+ int
+ loop;
+
+ struct libbenchmark_topology_lp_printing_offset
+ *tlpo;
+
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ struct lfds710_btree_au_element
+ *baue;
+
+ struct lfds710_list_aso_element
+ *lasoe = NULL;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( lpset != NULL );
+
+ lpset_string = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(char) * (ts->line_width+1), sizeof(char) );
+
+ for( loop = 0 ; loop < ts->line_width ; loop++ )
+ lpset_string[loop] = ' ';
+ lpset_string[loop] = '\0';
+
+ while( LFDS710_LIST_ASO_GET_START_AND_THEN_NEXT(*lpset,lasoe) )
+ {
+ tns = LFDS710_LIST_ASO_GET_VALUE_FROM_ELEMENT( *lasoe );
+
+ lfds710_btree_au_get_by_key( &ts->lp_printing_offset_lookup_tree, libbenchmark_topology_compare_node_against_lp_printing_offset_function, tns, &baue );
+ tlpo = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *baue );
+
+ lpset_string[tlpo->offset+1] = '1';
+ }
+
+ return lpset_string;
+}
+
+
+
+
+
+/****************************************************************************/
+lfds710_pal_uint_t count_of_logical_processors_below_node( struct lfds710_btree_au_element *baue )
+{
+ lfds710_pal_uint_t
+ lp_count = 0;
+
+ struct libbenchmark_topology_node_state
+ *root_node,
+ *tns;
+
+ LFDS710_PAL_ASSERT( baue != NULL );
+
+ tns = LFDS710_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue );
+ root_node = tns;
+
+ while( lfds710_btree_au_get_by_relative_position(&baue, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_SMALLER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ tns = LFDS710_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue );
+
+ if( tns->type == root_node->type and tns->type != LIBBENCHMARK_TOPOLOGY_NODE_TYPE_CACHE )
+ break;
+
+ if( tns->type == root_node->type and tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_CACHE and tns->extended_node_info.cache.type == root_node->extended_node_info.cache.type and tns->extended_node_info.cache.level == root_node->extended_node_info.cache.level )
+ break;
+
+ if( tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ lp_count++;
+ }
+
+ return lp_count;
+}
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ struct libbenchmark_topology_node_state
+ *tns;
+
+ struct line
+ *line;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ tns = (struct libbenchmark_topology_node_state *) new_key;
+ line = (struct line *) existing_key;
+
+ if( tns->type > line->type )
+ return 1;
+
+ if( tns->type < line->type )
+ return -1;
+
+ if( tns->type == line->type )
+ if( tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_CACHE and line->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_CACHE )
+ {
+ if( tns->extended_node_info.cache.level > line->extended_node_info.cache.level )
+ return 1;
+
+ if( tns->extended_node_info.cache.level < line->extended_node_info.cache.level )
+ return -1;
+
+ if( tns->extended_node_info.cache.level == line->extended_node_info.cache.level )
+ {
+ if( tns->extended_node_info.cache.type > line->extended_node_info.cache.type )
+ return 1;
+
+ if( tns->extended_node_info.cache.type < line->extended_node_info.cache.type )
+ return -1;
+
+ if( tns->extended_node_info.cache.type == line->extended_node_info.cache.type )
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+
+
+
+
+/****************************************************************************/
+static void strcat_spaces( char *string, lfds710_pal_uint_t number_width )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ LFDS710_PAL_ASSERT( string != NULL );
+ // TRD : number_width can be any value in its range
+
+ for( loop = 0 ; loop < number_width ; loop++ )
+ libshared_ansi_strcat( string, " " );
+
+ return;
+}
+
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_topology_node_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_topology_node_cleanup( struct libbenchmark_topology_node_state *tns, void (*element_cleanup_callback)(struct lfds710_list_aso_state *lasos, struct lfds710_list_aso_element *lasoe) )
+{
+ LFDS710_PAL_ASSERT( tns != NULL );
+ // TRD : element_cleanup_callback can be NULL
+
+ lfds710_list_aso_cleanup( &tns->logical_processor_children, element_cleanup_callback );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_topology_node_internal.h"
+
+/***** enums *****/
+enum libbenchmark_topology_node_set_type
+{
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SUBSET,
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SET,
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SUPERSET,
+ LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_NOT_A_SET
+};
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_topology_node_compare_nodes_function( void const *new_key, void const *existing_key )
+{
+ enum flag
+ finished_flag = LOWERED,
+ not_a_set_flag = LOWERED;
+
+ enum libbenchmark_topology_node_set_type
+ st = LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_NOT_A_SET;
+
+ int
+ cr = 0,
+ not_a_set_compare = 0, // TRD : remove compiler warning
+ rv = 0;
+
+ lfds710_pal_uint_t
+ lps_one_count = 0,
+ lps_two_count = 0,
+ shared_count = 0;
+
+ struct lfds710_list_aso_element
+ *lasoe_one,
+ *lasoe_two;
+
+ struct libbenchmark_topology_node_state
+ *new_tns,
+ *existing_tns,
+ *tns_one,
+ *tns_two;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ /* TRD : we compare the range of logic processors serviced by the nodes
+ the basic rule for cache/bus/memory arrangment is that the number of logical processors supported by a node
+ remains the same width or gets wider, as we go up the tree
+
+ so, first, we compare the logical processor list of each node
+
+ if the set in new_node is a subset of existing_node, then we are less than
+ if the set in new_node is the set of existing_node, then we compare on type (we know the ordering of types)
+ if the set in new_node is a superset of existing_node, then we are greater than
+ if the set in new_node is not a subset, set or superset of existing_node then we scan the logical processor
+ numbers and the first of new_node or existing_node to -have- the lowest number the other does -not-, is less than
+ */
+
+ new_tns = (struct libbenchmark_topology_node_state *) new_key;
+ existing_tns = (struct libbenchmark_topology_node_state *) existing_key;
+
+ if( new_tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR and existing_tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ {
+ if( new_tns->extended_node_info.logical_processor.windows_group_number == existing_tns->extended_node_info.logical_processor.windows_group_number )
+ {
+ if( new_tns->extended_node_info.logical_processor.number > existing_tns->extended_node_info.logical_processor.number )
+ cr = 1;
+
+ if( new_tns->extended_node_info.logical_processor.number < existing_tns->extended_node_info.logical_processor.number )
+ cr = -1;
+ }
+
+ if( new_tns->extended_node_info.logical_processor.windows_group_number < existing_tns->extended_node_info.logical_processor.windows_group_number )
+ cr = -1;
+
+ if( new_tns->extended_node_info.logical_processor.windows_group_number > existing_tns->extended_node_info.logical_processor.windows_group_number )
+ cr = 1;
+
+ if( cr == -1 )
+ not_a_set_compare = -1;
+
+ if( cr == 1 )
+ not_a_set_compare = 1;
+
+ if( cr == 0 )
+ shared_count = 1;
+
+ lps_one_count = lps_two_count = 1;
+ }
+
+ if( new_tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR and existing_tns->type != LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ {
+ tns_one = new_tns;
+
+ lasoe_two = LFDS710_LIST_ASO_GET_START( existing_tns->logical_processor_children );
+
+ while( lasoe_two != NULL and finished_flag == LOWERED )
+ {
+ tns_two = LFDS710_LIST_ASO_GET_VALUE_FROM_ELEMENT( *lasoe_two );
+
+ cr = 0;
+
+ if( tns_one->extended_node_info.logical_processor.windows_group_number == tns_two->extended_node_info.logical_processor.windows_group_number )
+ {
+ if( tns_one->extended_node_info.logical_processor.number > tns_two->extended_node_info.logical_processor.number )
+ cr = 1;
+
+ if( tns_one->extended_node_info.logical_processor.number < tns_two->extended_node_info.logical_processor.number )
+ cr = -1;
+ }
+
+ if( tns_one->extended_node_info.logical_processor.windows_group_number < tns_two->extended_node_info.logical_processor.windows_group_number )
+ cr = -1;
+
+ if( tns_one->extended_node_info.logical_processor.windows_group_number > tns_two->extended_node_info.logical_processor.windows_group_number )
+ cr = 1;
+
+ if( cr == -1 )
+ {
+ finished_flag = RAISED;
+ if( not_a_set_flag == LOWERED )
+ {
+ not_a_set_compare = -1;
+ not_a_set_flag = RAISED;
+ }
+ }
+
+ if( cr == 1 )
+ {
+ lasoe_two = LFDS710_LIST_ASO_GET_NEXT( *lasoe_two );
+ if( not_a_set_flag == LOWERED )
+ {
+ not_a_set_compare = 1;
+ not_a_set_flag = RAISED;
+ }
+ }
+
+ if( cr == 0 )
+ {
+ shared_count++;
+ finished_flag = RAISED;
+ }
+ }
+
+ lps_one_count = 1;
+ lfds710_list_aso_query( &existing_tns->logical_processor_children, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &lps_two_count );
+ }
+
+ if( new_tns->type != LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR and existing_tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ {
+ tns_two = existing_tns;
+
+ lasoe_one = LFDS710_LIST_ASO_GET_START( new_tns->logical_processor_children );
+
+ while( lasoe_one != NULL and finished_flag == LOWERED )
+ {
+ tns_one = LFDS710_LIST_ASO_GET_VALUE_FROM_ELEMENT( *lasoe_one );
+
+ cr = 0;
+
+ if( tns_one->extended_node_info.logical_processor.windows_group_number == tns_two->extended_node_info.logical_processor.windows_group_number )
+ {
+ if( tns_one->extended_node_info.logical_processor.number > tns_two->extended_node_info.logical_processor.number )
+ cr = 1;
+
+ if( tns_one->extended_node_info.logical_processor.number < tns_two->extended_node_info.logical_processor.number )
+ cr = -1;
+ }
+
+ if( tns_one->extended_node_info.logical_processor.windows_group_number < tns_two->extended_node_info.logical_processor.windows_group_number )
+ cr = -1;
+
+ if( tns_one->extended_node_info.logical_processor.windows_group_number > tns_two->extended_node_info.logical_processor.windows_group_number )
+ cr = 1;
+
+ if( cr == -1 )
+ {
+ lasoe_one = LFDS710_LIST_ASO_GET_NEXT( *lasoe_one );
+ if( not_a_set_flag == LOWERED )
+ {
+ not_a_set_compare = -1;
+ not_a_set_flag = RAISED;
+ }
+ }
+
+ if( cr == 1 )
+ {
+ finished_flag = RAISED;
+ if( not_a_set_flag == LOWERED )
+ {
+ not_a_set_compare = 1;
+ not_a_set_flag = RAISED;
+ }
+ }
+
+ if( cr == 0 )
+ {
+ shared_count++;
+ finished_flag = RAISED;
+ }
+ }
+
+ lfds710_list_aso_query( &new_tns->logical_processor_children, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &lps_one_count );
+ lps_two_count = 1;
+ }
+
+ if( new_tns->type != LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR and existing_tns->type != LIBBENCHMARK_TOPOLOGY_NODE_TYPE_LOGICAL_PROCESSOR )
+ {
+ // TRD : count the number of shared logical processors
+ lasoe_one = LFDS710_LIST_ASO_GET_START( new_tns->logical_processor_children );
+ lasoe_two = LFDS710_LIST_ASO_GET_START( existing_tns->logical_processor_children );
+
+ while( lasoe_one != NULL and lasoe_two != NULL )
+ {
+ tns_one = LFDS710_LIST_ASO_GET_VALUE_FROM_ELEMENT( *lasoe_one );
+ tns_two = LFDS710_LIST_ASO_GET_VALUE_FROM_ELEMENT( *lasoe_two );
+
+ cr = 0;
+
+ if( tns_one->extended_node_info.logical_processor.windows_group_number == tns_two->extended_node_info.logical_processor.windows_group_number )
+ {
+ if( tns_one->extended_node_info.logical_processor.number > tns_two->extended_node_info.logical_processor.number )
+ cr = 1;
+
+ if( tns_one->extended_node_info.logical_processor.number < tns_two->extended_node_info.logical_processor.number )
+ cr = -1;
+ }
+
+ if( tns_one->extended_node_info.logical_processor.windows_group_number < tns_two->extended_node_info.logical_processor.windows_group_number )
+ cr = -1;
+
+ if( tns_one->extended_node_info.logical_processor.windows_group_number > tns_two->extended_node_info.logical_processor.windows_group_number )
+ cr = 1;
+
+ if( cr == -1 )
+ {
+ lasoe_one = LFDS710_LIST_ASO_GET_NEXT( *lasoe_one );
+ if( not_a_set_flag == LOWERED )
+ {
+ not_a_set_compare = -1;
+ not_a_set_flag = RAISED;
+ }
+ }
+
+ if( cr == 1 )
+ {
+ lasoe_two = LFDS710_LIST_ASO_GET_NEXT( *lasoe_two );
+ if( not_a_set_flag == LOWERED )
+ {
+ not_a_set_compare = 1;
+ not_a_set_flag = RAISED;
+ }
+ }
+
+ if( cr == 0 )
+ {
+ shared_count++;
+ lasoe_one = LFDS710_LIST_ASO_GET_NEXT( *lasoe_one );
+ lasoe_two = LFDS710_LIST_ASO_GET_NEXT( *lasoe_two );
+ }
+ }
+
+ lfds710_list_aso_query( &new_tns->logical_processor_children, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &lps_one_count );
+ lfds710_list_aso_query( &existing_tns->logical_processor_children, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &lps_two_count );
+ }
+
+ // TRD : same number of logical processors, and they're fully shared
+ if( lps_one_count == lps_two_count and shared_count == lps_one_count )
+ st = LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SET;
+
+ // TRD : smaller number of logical processors, but they're all shared
+ if( lps_one_count < lps_two_count and shared_count == lps_one_count )
+ st = LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SUBSET;
+
+ // TRD : larger number of logical processors, but lps_two is fully represented in lps_one
+ if( lps_one_count > lps_two_count and shared_count == lps_two_count )
+ st = LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SUPERSET;
+
+ // TRD : otherwise, we're LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_NOT_A_SET, which is the default value
+
+ switch( st )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SUBSET:
+ rv = -1;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SET:
+ rv = libbenchmark_topology_node_compare_node_types_function( new_tns, existing_tns );
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SUPERSET:
+ rv = 1;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_NOT_A_SET:
+ rv = not_a_set_compare;
+ break;
+ }
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_topology_node_compare_node_types_function( void const *new_key, void const *existing_key )
+{
+ int
+ rv = 0;
+
+ struct libbenchmark_topology_node_state
+ *new_tns,
+ *existing_tns;
+
+ LFDS710_PAL_ASSERT( new_key != NULL );
+ LFDS710_PAL_ASSERT( existing_key != NULL );
+
+ new_tns = (struct libbenchmark_topology_node_state *) new_key;
+ existing_tns = (struct libbenchmark_topology_node_state *) existing_key;
+
+ if( new_tns->type < existing_tns->type )
+ return -1;
+
+ if( new_tns->type > existing_tns->type )
+ return 1;
+
+ if( new_tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_CACHE and existing_tns->type == LIBBENCHMARK_TOPOLOGY_NODE_TYPE_CACHE )
+ {
+ if( new_tns->extended_node_info.cache.level > existing_tns->extended_node_info.cache.level )
+ rv = 1;
+
+ if( new_tns->extended_node_info.cache.level < existing_tns->extended_node_info.cache.level )
+ rv = -1;
+
+ if( new_tns->extended_node_info.cache.level == existing_tns->extended_node_info.cache.level )
+ {
+ if( new_tns->extended_node_info.cache.type > existing_tns->extended_node_info.cache.type )
+ rv = 1;
+
+ if( new_tns->extended_node_info.cache.type < existing_tns->extended_node_info.cache.type )
+ rv = -1;
+ }
+ }
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+int libbenchmark_topology_node_compare_lpsets_function( struct lfds710_list_aso_state *lpset_one, struct lfds710_list_aso_state *lpset_two )
+{
+ enum flag
+ not_a_set_flag = LOWERED;
+
+ enum libbenchmark_topology_node_set_type
+ st = LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_NOT_A_SET;
+
+ int
+ cr = 0,
+ rv = 0,
+ not_a_set_compare = 0;
+
+ lfds710_pal_uint_t
+ lps_one_count,
+ lps_two_count,
+ shared_count = 0;
+
+ struct lfds710_list_aso_element
+ *lasoe_one,
+ *lasoe_two;
+
+ struct libbenchmark_topology_node_state
+ *tns_one,
+ *tns_two;
+
+ LFDS710_PAL_ASSERT( lpset_one != NULL );
+ LFDS710_PAL_ASSERT( lpset_two != NULL );
+
+ /* TRD : this function is utterly annoying
+ it is word for word identical to one of the compare cases in the general topology node compare function
+ except the compare result for IS_A_SET is 0 rather than a call to the type comparer
+ and yet - !
+ I cannot factorize the code with the general topology node compare function
+ ahhhhhhh
+ this function is used only by the compare function in the results API
+ */
+
+ // TRD : first, count the number of shared logical processors
+ lasoe_one = LFDS710_LIST_ASO_GET_START( *lpset_one );
+ lasoe_two = LFDS710_LIST_ASO_GET_START( *lpset_two );
+
+ while( lasoe_one != NULL and lasoe_two != NULL )
+ {
+ tns_one = LFDS710_LIST_ASO_GET_VALUE_FROM_ELEMENT( *lasoe_one );
+ tns_two = LFDS710_LIST_ASO_GET_VALUE_FROM_ELEMENT( *lasoe_two );
+
+ cr = 0;
+
+ if( tns_one->extended_node_info.logical_processor.windows_group_number == tns_two->extended_node_info.logical_processor.windows_group_number )
+ {
+ if( tns_one->extended_node_info.logical_processor.number > tns_two->extended_node_info.logical_processor.number )
+ cr = 1;
+
+ if( tns_one->extended_node_info.logical_processor.number < tns_two->extended_node_info.logical_processor.number )
+ cr = -1;
+ }
+
+ if( tns_one->extended_node_info.logical_processor.windows_group_number < tns_two->extended_node_info.logical_processor.windows_group_number )
+ cr = -1;
+
+ if( tns_one->extended_node_info.logical_processor.windows_group_number > tns_two->extended_node_info.logical_processor.windows_group_number )
+ cr = 1;
+
+ if( cr == -1 )
+ {
+ lasoe_one = LFDS710_LIST_ASO_GET_NEXT( *lasoe_one );
+ if( not_a_set_flag == LOWERED )
+ {
+ not_a_set_compare = -1;
+ not_a_set_flag = RAISED;
+ }
+ }
+
+ if( cr == 1 )
+ {
+ lasoe_two = LFDS710_LIST_ASO_GET_NEXT( *lasoe_two );
+ if( not_a_set_flag == LOWERED )
+ {
+ not_a_set_compare = 1;
+ not_a_set_flag = RAISED;
+ }
+ }
+
+ if( cr == 0 )
+ {
+ shared_count++;
+ lasoe_one = LFDS710_LIST_ASO_GET_NEXT( *lasoe_one );
+ lasoe_two = LFDS710_LIST_ASO_GET_NEXT( *lasoe_two );
+ }
+ }
+
+ lfds710_list_aso_query( lpset_one, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &lps_one_count );
+ lfds710_list_aso_query( lpset_two, LFDS710_LIST_ASO_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &lps_two_count );
+
+ // TRD : same number of logical processors, and they're fully shared
+ if( lps_one_count == lps_two_count and shared_count == lps_one_count )
+ st = LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SET;
+
+ // TRD : smaller number of logical processors, but they're all shared
+ if( lps_one_count < lps_two_count and shared_count == lps_one_count )
+ st = LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SUBSET;
+
+ // TRD : larger number of logical processors, but lps_two is fully represented in lps_one
+ if( lps_one_count > lps_two_count and shared_count == lps_two_count )
+ st = LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SUPERSET;
+
+ switch( st )
+ {
+ case LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SUBSET:
+ rv = -1;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SET:
+ rv = 0;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_SUPERSET:
+ rv = 1;
+ break;
+
+ case LIBBENCHMARK_TOPOLOGY_NODE_SET_TYPE_NOT_A_SET:
+ rv = not_a_set_compare;
+ break;
+ }
+
+ return rv;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libbenchmark_topology_node_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libbenchmark_topology_node_init( struct libbenchmark_topology_node_state *tns )
+{
+ LFDS710_PAL_ASSERT( tns != NULL );
+
+ // TRD : we only ever add logical processor nodes to the logical_processor_children list
+ lfds710_list_aso_init_valid_on_current_logical_core( &tns->logical_processor_children, libbenchmark_topology_node_compare_nodes_function, LFDS710_LIST_ASO_EXISTING_KEY_FAIL, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libbenchmark_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+##### paths #####
+BINDIR := ../../bin
+INCDIR := ../../inc
+OBJDIR := ../../obj
+SRCDIR := ../../src
+
+##### misc #####
+QUIETLY := 1>/dev/null 2>/dev/null
+VERSION_NUMBER := 1
+MINOR_NUMBER := 0
+RELEASE_NUMBER := 0
+
+##### sources, objects and libraries #####
+BINNAME := libshared
+ARFILENAME := $(BINNAME).a
+ARPATHNAME := $(BINDIR)/$(ARFILENAME)
+SOBASENAME := $(BINNAME).so
+SONAME := $(SOBASENAME).$(VERSION_NUMBER)
+SOFILENAME := $(SONAME).$(MINOR_NUMBER).$(RELEASE_NUMBER)
+SOPATHNAME := $(BINDIR)/$(SOFILENAME)
+INCNAME := $(INCDIR)/$(BINNAME).h
+SRCDIRS := libshared_ansi libshared_memory libshared_misc libshared_porting_abstraction_layer
+SOURCES := libshared_ansi_strcat.c libshared_ansi_strcat_char.c libshared_ansi_strcat_number.c libshared_ansi_strcpy.c libshared_ansi_strlen.c \
+ libshared_memory_add.c libshared_memory_alloc.c libshared_memory_cleanup.c libshared_memory_init.c libshared_memory_query.c libshared_memory_rollback.c \
+ libshared_misc_query.c \
+ libshared_porting_abstraction_layer_thread_start.c libshared_porting_abstraction_layer_thread_wait.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))
+SYSLIBS :=
+
+##### tools #####
+DG := gcc
+DGFLAGS_MANDATORY := -MM
+DGFLAGS_OPTIONAL := -std=gnu89
+
+CC := gcc
+CFLAGS_MANDATORY := -c
+CFLAGS_OPTIONAL := -ffreestanding -std=gnu89 -Wall -Werror -Wno-unknown-pragmas
+CFLAGS_MANDATORY_COV := -O0 -ggdb -DCOVERAGE -fprofile-arcs -ftest-coverage
+CFLAGS_MANDATORY_DBG := -O0 -ggdb -D_DEBUG
+CFLAGS_MANDATORY_PROF := -O0 -ggdb -DPROF -pg
+CFLAGS_MANDATORY_REL := -O2 -DNDEBUG
+CFLAGS_MANDATORY_TSAN := -O0 -ggdb -DTSAN -fsanitize=thread -fPIC
+
+AR := ar
+ARFLAGS :=
+ARFLAGS_MANDATORY := rcs
+ARFLAGS_OPTIONAL :=
+
+LD := gcc
+LDFLAGS_MANDATORY := -shared -Wl,-soname,$(SONAME) -o $(SOPATHNAME)
+LDFLAGS_OPTIONAL := -nodefaultlibs -nostdlib -std=gnu89 -Wall -Werror
+LDFLAGS_MANDATORY_COV := -O0 -fprofile-arcs -ftest-coverage
+LDFLAGS_MANDATORY_DBG := -O0 -ggdb
+LDFLAGS_MANDATORY_PROF := -O0 -pg
+LDFLAGS_MANDATORY_REL := -O2 -s
+LDFLAGS_MANDATORY_TSAN := -O0 -fsanitize=thread -fPIC
+
+##### build variants #####
+ifeq ($(findstring so,$(MAKECMDGOALS)),so)
+ CFLAGS_MANDATORY += -fPIC
+endif
+
+# TRD : default to debug
+ifeq ($(MAKECMDGOALS),)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring cov,$(MAKECMDGOALS)),cov)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_COV)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_COV)
+ SYSLIBS += -lgcov
+endif
+
+ifeq ($(findstring dbg,$(MAKECMDGOALS)),dbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring prof,$(MAKECMDGOALS)),prof)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_PROF)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_PROF)
+endif
+
+ifeq ($(findstring rel,$(MAKECMDGOALS)),rel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+endif
+
+ifeq ($(findstring tsan,$(MAKECMDGOALS)),tsan)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_TSAN)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_TSAN)
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.o : %.c
+ $(DG) $(DGFLAGS_OPTIONAL) $(DGFLAGS) $(DGFLAGS_MANDATORY) $< >$(OBJDIR)/$*.d
+ $(CC) $(CFLAGS_OPTIONAL) $(CFLAGS) $(CFLAGS_MANDATORY) -o $@ $<
+
+##### explicit rules #####
+$(ARPATHNAME) : $(OBJECTS)
+ $(AR) $(ARFLAGS_OPTIONAL) $(ARFLAGS) $(ARFLAGS_MANDATORY) $(ARPATHNAME) $(OBJECTS)
+
+$(SOPATHNAME) : $(OBJECTS)
+ $(LD) $(LDFLAGS_OPTIONAL) $(LDFLAGS) $(LDFLAGS_MANDATORY) $(OBJECTS) -o $(SOPATHNAME)
+ @ln -fs $(SOFILENAME) $(BINDIR)/$(SONAME)
+ @ln -fs $(SOFILENAME) $(BINDIR)/$(SOBASENAME)
+
+##### phony #####
+.PHONY : clean ar_cov ar_dbg ar_prof ar_rel ar_tsan ar_vanilla so_dbg so_prof so_rel so_tsan so_vanilla
+
+clean :
+ @rm -f $(BINDIR)/* $(OBJDIR)/*
+
+ar_cov : $(ARPATHNAME) # archive (.a), coverage
+ar_dbg : $(ARPATHNAME) # archive (.a), debug
+ar_prof : $(ARPATHNAME) # archive (.a), profiling
+ar_rel : $(ARPATHNAME) # archive (.a), release
+ar_tsan : $(ARPATHNAME) # archive (.a), thread sanitizer
+ar_vanilla : $(ARPATHNAME) # archive (.a), no specific-build arguments
+ar_install :
+ # TRD : leading backslash to use command rather than alias
+ # as many Linux distros have a built-in alias to force
+ # a prompt ("y/n?") on file overwrite - silent and
+ # unexpected interference which breaks a makefile
+ @mkdir -p $(INSLIBDIR)
+ @\cp $(ARPATHNAME) $(INSLIBDIR)
+ @mkdir -p $(INSINCDIR)
+ @\cp -r $(INCDIR)/* $(INSINCDIR)
+ar_uninstall :
+ @rm $(INSLIBDIR)/$(ARFILENAME)
+ @rm -r $(INSINCDIR)/$(BINNAME)
+ @rm -r $(INSINCDIR)/$(BINNAME).h
+
+# TRD : so_cov currently disabled as it cannot work with -nostdlib -nodefaultlibs
+# so_cov : $(SOPATHNAME) # shared (.so), coverage
+so_dbg : $(SOPATHNAME) # shared (.so), debug
+so_prof : $(SOPATHNAME) # shared (.so), profiling
+so_rel : $(SOPATHNAME) # shared (.so), release
+so_tsan : $(SOPATHNAME) # shared (.so), thread sanitizer
+so_vanilla : $(SOPATHNAME) # shared (.so), no specific-build arguments
+so_install :
+ @mkdir -p $(INSINCDIR)
+ @\cp $(SOPATHNAME) $(INSLIBDIR)
+ @ldconfig -vn $(INSLIBDIR)
+ @ln -s $(SONAME) $(INSLIBDIR)/$(SOBASENAME)
+ @mkdir -p $(INSLIBDIR)
+ @\cp -r $(INCDIR)/* $(INSINCDIR)
+so_uninstall :
+ @rm -f $(INSLIBDIR)/$(SOFILENAME)
+ @rm -f $(INSLIBDIR)/$(SOBASENAME)
+ @rm -f $(INSLIBDIR)/$(SONAME)
+ @rm -r $(INSINCDIR)/$(BINNAME)
+ @rm -r $(INSINCDIR)/$(BINNAME).h
+
+##### dependencies #####
+-include $(DEPENDS)
+
--- /dev/null
+lib-y :=
+
+lib-y += ../../src/libshared_ansi/libshared_ansi_strcat.o
+lib-y += ../../src/libshared_ansi/libshared_ansi_strcat_char.o
+lib-y += ../../src/libshared_ansi/libshared_ansi_strcat_number.o
+lib-y += ../../src/libshared_ansi/libshared_ansi_strcpy.o
+lib-y += ../../src/libshared_ansi/libshared_ansi_strlen.o
+
+lib-y += ../../src/libshared_memory/libshared_memory_add.o
+lib-y += ../../src/libshared_memory/libshared_memory_alloc.o
+lib-y += ../../src/libshared_memory/libshared_memory_cleanup.o
+lib-y += ../../src/libshared_memory/libshared_memory_init.o
+lib-y += ../../src/libshared_memory/libshared_memory_query.o
+lib-y += ../../src/libshared_memory/libshared_memory_rollback.o
+
+lib-y += ../../src/libshared_misc/libshared_misc_query.o
+
+lib-y += ../../src/libshared_porting_abstraction_layer/libshared_porting_abstraction_layer_thread_start.o
+lib-y += ../../src/libshared_porting_abstraction_layer/libshared_porting_abstraction_layer_thread_wait.o
+
+libs-y := ../../bin/
+
+ccflags-y := -I$(src)/../../inc
+ccflags-y += -I$(src)/../../inc/libshared
+ccflags-y += -DKERNEL_MODE
+ccflags-y += -DNDEBUG
+ccflags-y += -ffreestanding
+ccflags-y += -std=gnu89
+ccflags-y += -Wall
+ccflags-y += -Werror
+ccflags-y += -Wno-unknown-pragmas
+
--- /dev/null
+default:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD)
+
+clean:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) clean
+ find ../../src/ -name "*.o" -type f -delete
+
+help:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) help
+
+modules:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) modules
+
+
--- /dev/null
+Good filename, eh? :-)\r
+\r
+The build is broken because the porting abstraction layer is incomplete.\r
+\r
+The Linux kernel has no function which offers "wait on thread handle\r
+until thread has termianted". The user has to manage the sync himself.\r
+This breaks the libshared thread abstraction.\r
+\r
+\r
--- /dev/null
+EXPORTS
+
+libshared_ansi_strlen = libshared_ansi_strlen
+libshared_ansi_strcpy = libshared_ansi_strcpy
+libshared_ansi_strcat = libshared_ansi_strcat
+libshared_ansi_strcat_number = libshared_ansi_strcat_number
+libshared_ansi_strcat_number_with_leading_zeros = libshared_ansi_strcat_number_with_leading_zeros
+libshared_ansi_strcat_char = libshared_ansi_strcat_char
+
+libshared_memory_init = libshared_memory_init
+libshared_memory_cleanup = libshared_memory_cleanup
+libshared_memory_add_memory = libshared_memory_add_memory
+libshared_memory_add_memory_from_numa_node = libshared_memory_add_memory_from_numa_node
+libshared_memory_alloc_from_unknown_node = libshared_memory_alloc_from_unknown_node
+libshared_memory_alloc_from_specific_node = libshared_memory_alloc_from_specific_node
+libshared_memory_alloc_from_most_free_space_node = libshared_memory_alloc_from_most_free_space_node
+libshared_memory_alloc_largest_possible_array_from_unknown_node = libshared_memory_alloc_largest_possible_array_from_unknown_node
+libshared_memory_set_rollback = libshared_memory_set_rollback
+libshared_memory_rollback = libshared_memory_rollback
+libshared_memory_query = libshared_memory_query
+
+libshared_misc_query = libshared_misc_query
+
+libshared_pal_thread_start = libshared_pal_thread_start
+libshared_pal_thread_wait = libshared_pal_thread_wait
+
--- /dev/null
+##### paths #####
+BINDIR := ..\..\bin
+INCDIR := ..\..\inc
+OBJDIR := ..\..\obj
+SRCDIR := ..\..\src
+
+##### misc #####
+QUIETLY := 1>nul 2>nul
+NULL :=
+SPACE := $(NULL) # TRD : with a trailing space
+
+##### sources, objects and libraries #####
+BINNAME := libshared
+LIB_BINARY := $(BINDIR)\$(BINNAME).lib
+DLL_BINARY := $(BINDIR)\$(BINNAME).dll
+SRCDIRS := libshared_ansi libshared_memory libshared_misc libshared_porting_abstraction_layer
+SOURCES := libshared_ansi_strcat.c libshared_ansi_strcat_char.c libshared_ansi_strcat_number.c libshared_ansi_strcpy.c libshared_ansi_strlen.c \
+ libshared_memory_add.c libshared_memory_alloc.c libshared_memory_cleanup.c libshared_memory_init.c libshared_memory_query.c libshared_memory_rollback.c \
+ libshared_misc_query.c \
+ libshared_porting_abstraction_layer_thread_start.c libshared_porting_abstraction_layer_thread_wait.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))
+SYSLIBS := kernel32.lib
+USRLIBS := ..\..\..\..\liblfds710\bin\liblfds710.lib
+
+##### default paths fix up #####
+INCDIRS := $(patsubst %,%;,$(INCDIR))
+INCLUDE += $(subst $(SPACE),,$(INCDIRS))
+
+##### tools #####
+CC := cl
+CFLAGS_MANDATORY := /c "/Fd$(BINDIR)\$(BINNAME).pdb" /wd 4068
+CFLAGS_OPTIONAL := /DWIN32_LEAN_AND_MEAN /DUNICODE /D_UNICODE /nologo /W4 /WX
+CFLAGS_MANDATORY_DBG := /Od /Gm /Zi /D_DEBUG
+CFLAGS_MANDATORY_REL := /Ox /DNDEBUG
+
+AR := lib
+ARFLAGS :=
+ARFLAGS_MANDATORY := /subsystem:console
+ARFLAGS_OPTIONAL := /nologo /wx /verbose
+
+LD := link
+LDFLAGS_MANDATORY := /def:$(BINNAME).def /dll /nodefaultlib /subsystem:console
+LDFLAGS_OPTIONAL := /nologo /nxcompat /wx
+LDFLAGS_MANDATORY_DBG := /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"
+LDFLAGS_MANDATORY_REL := /incremental:no
+
+##### variants #####
+ifeq ($(MAKECMDGOALS),) # TRD : default to debug lib
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MTd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),libdbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MTd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := msvcrtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),librel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL) /MT
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+ CLIB := msvcrt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dlldbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MDd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := msvcrtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dllrel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL) /MD
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+ CLIB := msvcrt.lib
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.obj : %.c
+ $(CC) $(CFLAGS_OPTIONAL) $(CFLAGS) $(CFLAGS_MANDATORY) "/Fo$@" $<
+
+##### explicit rules #####
+$(LIB_BINARY) : $(OBJECTS)
+ $(AR) $(ARFLAGS_OPTIONAL) $(ARFLAGS) $(ARFLAGS_MANDATORY) $(OBJECTS) /out:$(LIB_BINARY)
+
+$(DLL_BINARY) : $(OBJECTS)
+ $(LD) $(LDFLAGS_OPTIONAL) $(LDFLAGS) $(LDFLAGS_MANDATORY) $(CLIB) $(SYSLIBS) $(USRLIBS) $(OBJECTS) /out:$(DLL_BINARY)
+
+##### phony #####
+.PHONY : clean librel libdbg dllrel dlldbg
+
+clean :
+ @erase /Q $(BINDIR)\$(BINNAME).* $(OBJDIR)\*.obj $(QUIETLY)
+
+dllrel : $(DLL_BINARY)
+dlldbg : $(DLL_BINARY)
+
+librel : $(LIB_BINARY)
+libdbg : $(LIB_BINARY)
+
+##### notes #####
+# /wd 4068 : turn off "unknown pragma" warning
+
--- /dev/null
+DIRS = single_dir_for_windows_kernel
+
+
--- /dev/null
+#include "libshared_internal.h"
+
+
+
+
+
+/****************************************************************************/
+DRIVER_INITIALIZE DriverEntry;
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+NTSTATUS DriverEntry( struct _DRIVER_OBJECT *DriverObject, PUNICODE_STRING RegistryPath )
+{
+ return STATUS_SUCCESS;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+EXPORTS
+
+libshared_ansi_strlen = libshared_ansi_strlen
+libshared_ansi_strcpy = libshared_ansi_strcpy
+libshared_ansi_strcat = libshared_ansi_strcat
+libshared_ansi_strcat_number = libshared_ansi_strcat_number
+libshared_ansi_strcat_number_with_leading_zeros = libshared_ansi_strcat_number_with_leading_zeros
+libshared_ansi_strcat_char = libshared_ansi_strcat_char
+
+libshared_memory_init = libshared_memory_init
+libshared_memory_cleanup = libshared_memory_cleanup
+libshared_memory_add_memory = libshared_memory_add_memory
+libshared_memory_add_memory_from_numa_node = libshared_memory_add_memory_from_numa_node
+libshared_memory_alloc_from_unknown_node = libshared_memory_alloc_from_unknown_node
+libshared_memory_alloc_from_specific_node = libshared_memory_alloc_from_specific_node
+libshared_memory_alloc_from_most_free_space_node = libshared_memory_alloc_from_most_free_space_node
+libshared_memory_alloc_largest_possible_array_from_unknown_node = libshared_memory_alloc_largest_possible_array_from_unknown_node
+libshared_memory_set_rollback = libshared_memory_set_rollback
+libshared_memory_rollback = libshared_memory_rollback
+libshared_memory_query = libshared_memory_query
+
+libshared_misc_query = libshared_misc_query
+
+libshared_pal_thread_start = libshared_pal_thread_start
+libshared_pal_thread_wait = libshared_pal_thread_wait
+
--- /dev/null
+The Windows kernel build environment is primitive and has a number
+of severe limitations; in particular, all source files must be in
+one directory and it is not possible to choose the output binary type
+(static or dynamic library) from the build command line; rather,
+a string has to be modified in a text file used by the build (!)
+
+To deal with these limitations, it is necessary for a Windows kernel
+build to run a batch file prior to building.
+
+There are two batch files, one for static library builds and the other
+for dynamic library builds.
+
+They are both idempotent; you can run them as often as you like and
+switch between them as often as you want. It's all fine; whenever
+you run one of them, it will take you from whatever state you were
+previously in, into the state you want to be in.
+
+Both batch files copy all the sources file into a single directory,
+"/src/single_dir_for_windows_kernel/".
+
+The static library batch file will then copy "/sources.static" into
+"/src/single_dir_for_windows_kernel/", which will cause a static
+library to be built.
+
+The dynamic library batch file will then copy "/sources.dynamic" into
+"/src/single_dir_for_windows_kernel/", which will cause a dynamic
+library to be built. It will also copy "src/driver_entry.c" into
+"/src/single_dir_for_windows_kernel/", since the linker requires
+the DriverEntry function to exist for dynamic libraries, even
+though it's not used.
+
+
--- /dev/null
+@echo off
+rmdir /q /s single_dir_for_windows_kernel 1>nul 2>nul
+mkdir single_dir_for_windows_kernel 1>nul 2>nul
+
+copy /y ..\..\src\libshared_ansi\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libshared_memory\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libshared_misc\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libshared_porting_abstraction_layer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+
+copy /y ..\..\src\libshared_internal.h single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y driver_entry_renamed_to_avoid_compiler_warning.c single_dir_for_windows_kernel\driver_entry.c 1>nul 2>nul
+copy /y sources.dynamic single_dir_for_windows_kernel\sources 1>nul 2>nul
+
+echo Windows kernel dynamic library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+@echo off
+rmdir /q /s single_dir_for_windows_kernel 1>nul 2>nul
+mkdir single_dir_for_windows_kernel 1>nul 2>nul
+
+copy /y ..\..\src\libshared_ansi\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libshared_memory\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libshared_misc\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libshared_porting_abstraction_layer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+
+copy /y ..\..\src\libshared_internal.h single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y sources.static single_dir_for_windows_kernel\sources 1>nul 2>nul
+
+echo Windows kernel static library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /wd4127 /W4
+DLLDEF = ../libshared.def
+TARGETNAME = libshared
+TARGETPATH = ../../../bin/
+TARGETTYPE = EXPORT_DRIVER
+UMTYPE = nt
+USER_C_FLAGS = /DKERNEL_MODE /DNDEBUG
+
+INCLUDES = ../../../inc/
+SOURCES = libshared_ansi_strcat.c \
+ libshared_ansi_strcat_char.c \
+ libshared_ansi_strcat_number.c \
+ libshared_ansi_strcpy.c \
+ libshared_ansi_strlen.c \
+ libshared_memory_add.c \
+ libshared_memory_alloc.c \
+ libshared_memory_cleanup.c \
+ libshared_memory_init.c \
+ libshared_memory_query.c \
+ libshared_memory_rollback.c \
+ libshared_misc_query.c \
+ libshared_porting_abstraction_layer_thread_start.c \
+ libshared_porting_abstraction_layer_thread_wait.c \
+ driver_entry.c
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /wd4127 /W4
+TARGETNAME = libenchmark
+TARGETPATH = ../../../bin/
+TARGETTYPE = DRIVER_LIBRARY
+UMTYPE = nt
+USER_C_FLAGS = /DKERNEL_MODE /DNDEBUG
+
+INCLUDES = ../../../inc/
+SOURCES = libshared_ansi_strcat.c \
+ libshared_ansi_strcat_char.c \
+ libshared_ansi_strcat_number.c \
+ libshared_ansi_strcpy.c \
+ libshared_ansi_strlen.c \
+ libshared_memory_add.c \
+ libshared_memory_alloc.c \
+ libshared_memory_cleanup.c \
+ libshared_memory_init.c \
+ libshared_memory_query.c \
+ libshared_memory_rollback.c \
+ libshared_misc_query.c \
+ libshared_porting_abstraction_layer_thread_start.c \
+ libshared_porting_abstraction_layer_thread_wait.c
+
--- /dev/null
+#ifndef LIBSHARED_H
+
+ /***** defines *****/
+ #define LIBSHARED_H
+
+ /***** enums *****/
+ enum flag
+ {
+ LOWERED,
+ RAISED
+ };
+
+ /***** platform includes *****/
+ #include "libshared/libshared_porting_abstraction_layer_operating_system.h"
+
+ /***** extermal includes *****/
+ #include "../../../liblfds710/inc/liblfds710.h"
+
+ /***** includes *****/
+ #include "libshared/libshared_ansi.h"
+ #include "libshared/libshared_memory.h"
+ #include "libshared/libshared_misc.h"
+ #include "libshared/libshared_porting_abstraction_layer.h"
+
+#endif
+
--- /dev/null
+/***** defines *****/
+
+/***** enums *****/
+
+/***** structs *****/
+
+/***** public prototypes *****/
+lfds710_pal_uint_t libshared_ansi_strlen( char const * const string );
+void libshared_ansi_strcpy( char *destination, char const *source );
+void libshared_ansi_strcat( char *destination, char const * const source );
+void libshared_ansi_strcat_number( char *destination, lfds710_pal_uint_t number );
+void libshared_ansi_strcat_number_with_leading_zeros( char *destination, lfds710_pal_uint_t number, lfds710_pal_uint_t minimum_width );
+void libshared_ansi_strcat_char( char *destination, char const source );
+
--- /dev/null
+/***** enums *****/
+enum libshared_memory_query
+{
+ LIBSHARED_MEMORY_QUERY_GET_AVAILABLE
+};
+
+/***** structs *****/
+struct libshared_memory_element
+{
+ char unsigned
+ *original,
+ *original_after_me_alloc,
+ *current_pointer, // TRD : "_pointer" on the end 'cause Linux kernel has lower case defines one of which is "current"
+ *rollback;
+
+ enum flag
+ known_numa_node_flag;
+
+ lfds710_pal_uint_t
+ current_memory_size_in_bytes,
+ numa_node_id,
+ original_memory_size_in_bytes,
+ original_after_me_alloc_memory_size_in_bytes,
+ rollback_memory_size_in_bytes;
+
+ struct lfds710_list_asu_element
+ lasue;
+};
+
+struct libshared_memory_state
+{
+ struct lfds710_list_asu_state
+ list_of_allocations;
+};
+
+/***** public prototypes *****/
+void libshared_memory_init( struct libshared_memory_state *ms );
+void libshared_memory_cleanup( struct libshared_memory_state *ms,
+ void (*memory_cleanup_callback)(enum flag known_numa_node_flag,
+ void *store,
+ lfds710_pal_uint_t size) );
+
+void libshared_memory_add_memory( struct libshared_memory_state *ms,
+ void *memory,
+ lfds710_pal_uint_t memory_size_in_bytes );
+void libshared_memory_add_memory_from_numa_node( struct libshared_memory_state *ms,
+ lfds710_pal_uint_t numa_node_id,
+ void *memory,
+ lfds710_pal_uint_t memory_size_in_bytes );
+
+void *libshared_memory_alloc_from_unknown_node( struct libshared_memory_state *ms, lfds710_pal_uint_t size_in_bytes, lfds710_pal_uint_t alignment_in_bytes );
+void *libshared_memory_alloc_from_specific_node( struct libshared_memory_state *ms, lfds710_pal_uint_t numa_node_id, lfds710_pal_uint_t size_in_bytes, lfds710_pal_uint_t alignment_in_bytes );
+void *libshared_memory_alloc_from_most_free_space_node( struct libshared_memory_state *ms, lfds710_pal_uint_t size_in_bytes, lfds710_pal_uint_t alignment_in_bytes );
+void *libshared_memory_alloc_largest_possible_array_from_unknown_node( struct libshared_memory_state *ms, lfds710_pal_uint_t element_size_in_bytes, lfds710_pal_uint_t alignment_in_bytes, lfds710_pal_uint_t *number_elements );
+
+void libshared_memory_set_rollback( struct libshared_memory_state *ms );
+void libshared_memory_rollback( struct libshared_memory_state *ms );
+
+void libshared_memory_query( struct libshared_memory_state *ms,
+ enum libshared_memory_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LIBSHARED_MISC_VERSION_STRING "7.1.0"
+#define LIBSHARED_MISC_VERSION_INTEGER 710
+
+/***** enums *****/
+enum libshared_misc_query
+{
+ LIBSHARED_MISC_QUERY_GET_BUILD_AND_VERSION_STRING
+};
+
+/***** externs *****/
+
+/***** public prototypes *****/
+void libshared_misc_query( enum libshared_misc_query query_type, void *query_input, void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LIBSHARED_PAL_PTI_GET_LOGICAL_PROCESSOR_NUMBER( libshared_pal_thread_info ) (libshared_pal_thread_info).logical_processor_number
+#define LIBSHARED_PAL_PTI_GET_WINDOWS_PROCESSOR_GROUP_NUMBER( libshared_pal_thread_info ) (libshared_pal_thread_info).windows_processor_group_number
+#define LIBSHARED_PAL_PTI_GET_NUMA_NODE_ID( libshared_pal_thread_info ) (libshared_pal_thread_info).numa_node_id
+#define LIBSHARED_PAL_PTI_GET_THREAD_FUNCTION( libshared_pal_thread_info ) (libshared_pal_thread_info).thread_function
+#define LIBSHARED_PAL_PTI_GET_THREAD_ARGUMENT( libshared_pal_thread_info ) (libshared_pal_thread_info).thread_argument
+
+/***** structs *****/
+struct libshared_pal_thread_info
+{
+ // TRD : this struct must be user-allocated and last till the thread ends - needed for thread pinning on android
+
+ lfds710_pal_uint_t
+ logical_processor_number,
+ numa_node_id,
+ windows_processor_group_number;
+
+ libshared_pal_thread_return_t
+ (LIBSHARED_PAL_THREAD_CALLING_CONVENTION *thread_function)( void *thread_argument );
+
+ void
+ *thread_argument;
+};
+
+/***** public prototypes *****/
+int libshared_pal_thread_start( libshared_pal_thread_handle_t *thread_handle,
+ struct libshared_pal_thread_info *pti );
+
+void libshared_pal_thread_wait( libshared_pal_thread_handle_t thread_handle );
+
--- /dev/null
+/****************************************************************************/
+#if( defined _WIN32 && NTDDI_VERSION >= NTDDI_WINXP && !defined KERNEL_MODE )
+
+ #ifdef LIBSHARED_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform in "libshared_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LIBSHARED_PAL_OPERATING_SYSTEM
+
+ #define LIBSHARED_PAL_OS_STRING "Windows"
+
+ #include <windows.h>
+
+ typedef HANDLE libshared_pal_thread_handle_t;
+ typedef DWORD libshared_pal_thread_return_t;
+
+ #define LIBSHARED_PAL_THREAD_CALLING_CONVENTION WINAPI
+ #define LIBSHARED_PAL_THREAD_RETURN_TYPE libshared_pal_thread_return_t
+ #define LIBSHARED_PAL_THREAD_RETURN_CAST( return_value ) return_value
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && NTDDI_VERSION >= NTDDI_WINXP && defined KERNEL_MODE )
+
+ #ifdef LIBSHARED_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform in "libshared_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LIBSHARED_PAL_OPERATING_SYSTEM
+
+ #define LIBSHARED_PAL_OS_STRING "Windows"
+
+ #include <wdm.h>
+
+ typedef HANDLE libshared_pal_thread_handle_t;
+ typedef VOID libshared_pal_thread_return_t;
+
+ #define LIBSHARED_PAL_THREAD_CALLING_CONVENTION
+ #define LIBSHARED_PAL_THREAD_RETURN_TYPE int
+ #define LIBSHARED_PAL_THREAD_RETURN_CAST( return_value )
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && !defined KERNEL_MODE )
+
+ #ifdef LIBSHARED_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform in "libshared_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LIBSHARED_PAL_OPERATING_SYSTEM
+
+ #define _GNU_SOURCE
+ #include <unistd.h>
+
+ #define LIBSHARED_PAL_OS_STRING "Linux"
+
+ #if( _POSIX_THREADS >= 0 )
+ #include <pthread.h>
+ #include <sched.h>
+ #include <sys/syscall.h>
+ #include <sys/types.h>
+
+ typedef pthread_t libshared_pal_thread_handle_t;
+ typedef void * libshared_pal_thread_return_t;
+
+ #define LIBSHARED_PAL_THREAD_CALLING_CONVENTION
+ #define LIBSHARED_PAL_THREAD_RETURN_TYPE libshared_pal_thread_return_t
+ #define LIBSHARED_PAL_THREAD_RETURN_CAST( return_value ) ( (libshared_pal_thread_return_t) (return_value) )
+ #endif
+
+ #if( _POSIX_THREADS == -1 )
+ #error No pthread support under Linux in libshared_porting_abstraction_layer_operating_system.h
+ #endif
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && defined KERNEL_MODE )
+
+ #ifdef LIBSHARED_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform. in "libshared_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LIBSHARED_PAL_OPERATING_SYSTEM
+
+ #define LIBSHARED_PAL_OS_STRING "Linux"
+
+ #define _GNU_SOURCE
+
+ #include <linux/module.h>
+ #include <linux/kthread.h>
+
+ typedef struct task_struct * libshared_pal_thread_handle_t;
+ typedef int libshared_pal_thread_return_t;
+
+ #define LIBSHARED_PAL_THREAD_CALLING_CONVENTION
+ #define LIBSHARED_PAL_THREAD_RETURN_TYPE libshared_pal_thread_return_t
+ #define LIBSHARED_PAL_THREAD_RETURN_CAST( return_value ) ( (libshared_pal_thread_return_t) (return_value) )
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBSHARED_PAL_OPERATING_SYSTEM )
+
+ #error No matching porting abstraction layer in "libshared_porting_abstraction_layer_operating_system.h".
+
+#endif
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libshared_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libshared_ansi_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libshared_ansi_strcat( char *destination, char const * const source )
+{
+ LFDS710_PAL_ASSERT( destination != NULL );
+ LFDS710_PAL_ASSERT( source != NULL );
+
+ while( *destination++ != '\0' );
+
+ libshared_ansi_strcpy( destination-1, source );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libshared_ansi_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libshared_ansi_strcat_char( char *destination, char const source )
+{
+ LFDS710_PAL_ASSERT( destination != NULL );
+ // TRD : source can be any value in its range
+
+ while( *destination++ != '\0' );
+
+ *(destination-1) = source;
+ *destination = '\0';
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libshared_ansi_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libshared_ansi_strcat_number( char *destination, lfds710_pal_uint_t number )
+{
+ lfds710_pal_uint_t
+ digit,
+ length = 0,
+ original_number;
+
+ LFDS710_PAL_ASSERT( destination != NULL );
+ // TRD : number can be any value in its range
+
+ // TRD : point destination at the end of the string
+ while( *destination++ != '\0' );
+
+ destination--;
+
+ // TRD : figure out length of the number
+
+ original_number = number;
+
+ do
+ {
+ digit = number % 10;
+ length++;
+ number -= digit;
+ number /= 10;
+ }
+ while( number > 0 );
+
+ destination[length] = '\0';
+
+ // TRD : copy over the number digits - note we get them the right way around
+
+ number = original_number;
+
+ do
+ {
+ digit = number % 10;
+ destination[--length] = (char) ( digit + '0' );
+ number -= digit;
+ number /= 10;
+ }
+ while( number > 0 );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libshared_ansi_strcat_number_with_leading_zeros( char *destination, lfds710_pal_uint_t number, lfds710_pal_uint_t minimum_width )
+{
+ lfds710_pal_uint_t
+ digit,
+ length = 0,
+ loop,
+ original_number;
+
+ LFDS710_PAL_ASSERT( destination != NULL );
+ // TRD : number can be any value in its range
+ // TRD : minimum_width can be any value in its range
+
+ // TRD : point destination at the end of the string
+ while( *destination++ != '\0' );
+
+ destination--;
+
+ // TRD : figure out length of the number
+
+ original_number = number;
+
+ do
+ {
+ digit = number % 10;
+ length++;
+ number -= digit;
+ number /= 10;
+ }
+ while( number > 0 );
+
+ if( length < minimum_width )
+ for( loop = 0 ; loop < minimum_width - length ; loop++ )
+ *destination++ = '0';
+
+ destination[length] = '\0';
+
+ // TRD : copy over the number digits - note we get them the right way around
+
+ number = original_number;
+
+ do
+ {
+ digit = number % 10;
+ destination[--length] = (char) ( digit + '0' );
+ number -= digit;
+ number /= 10;
+ }
+ while( number > 0 );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libshared_ansi_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4706 )
+
+void libshared_ansi_strcpy( char *destination, char const *source )
+{
+ LFDS710_PAL_ASSERT( destination != NULL );
+ LFDS710_PAL_ASSERT( source != NULL );
+
+ while( (*destination++ = *source++) );
+
+ return;
+}
+
+#pragma warning( default : 4706 )
+
--- /dev/null
+/***** includes *****/
+#include "libshared_ansi_internal.h"
+
+
+
+
+
+/****************************************************************************/
+lfds710_pal_uint_t libshared_ansi_strlen( char const * const string )
+{
+ char const
+ *temp;
+
+ LFDS710_PAL_ASSERT( string != NULL );
+
+ temp = (char const *) string;
+
+ while( *temp++ != '\0' );
+
+ return (lfds710_pal_uint_t) (temp-1 - string);
+}
+
--- /dev/null
+/***** public prototypes *****/
+#include "../inc/libshared.h"
+
+/***** defines *****/
+#define and &&
+#define or ||
+
+#define NO_FLAGS 0x0
+
+#define RETURN_SUCCESS 0
+#define RETURN_FAILURE 1
+
+#define LIBSHARED_VERSION_STRING "7.1.0"
+#define LIBSHARED_VERSION_INTEGER 710
+
+#if( defined KERNEL_MODE )
+ #define MODE_TYPE_STRING "kernel-mode"
+#endif
+
+#if( !defined KERNEL_MODE )
+ #define MODE_TYPE_STRING "user-mode"
+#endif
+
+#if( defined NDEBUG && !defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "release"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "debug"
+#endif
+
+#if( !defined NDEBUG && defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "coverage"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "threadsanitizer"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN && defined PROF )
+ #define BUILD_TYPE_STRING "profiling"
+#endif
+
+/***** library-wide prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libshared_memory_internal.h"
+
+/***** private prototypes *****/
+static void alloc_and_init_memory_element( struct libshared_memory_element **me, void *memory, lfds710_pal_uint_t memory_size_in_bytes );
+
+
+
+
+/****************************************************************************/
+void libshared_memory_add_memory_from_numa_node( struct libshared_memory_state *ms, lfds710_pal_uint_t numa_node_id, void *memory, lfds710_pal_uint_t memory_size_in_bytes )
+{
+ struct libshared_memory_element
+ *me;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_node_id can be any value in its range
+ LFDS710_PAL_ASSERT( memory != NULL );
+ // TRD : memory_size_in_bytes can be any value in its range
+
+ alloc_and_init_memory_element( &me, memory, memory_size_in_bytes );
+
+ me->known_numa_node_flag = RAISED;
+ me->numa_node_id = numa_node_id;
+
+ LFDS710_LIST_ASU_SET_KEY_IN_ELEMENT( me->lasue, me );
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( me->lasue, me );
+ lfds710_list_asu_insert_at_start( &ms->list_of_allocations, &me->lasue );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libshared_memory_add_memory( struct libshared_memory_state *ms, void *memory, lfds710_pal_uint_t memory_size_in_bytes )
+{
+ struct libshared_memory_element
+ *me;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( memory != NULL );
+ // TRD : memory_size_in_bytes can be any value in its range
+
+ alloc_and_init_memory_element( &me, memory, memory_size_in_bytes );
+
+ me->known_numa_node_flag = LOWERED;
+
+ LFDS710_LIST_ASU_SET_KEY_IN_ELEMENT( me->lasue, me );
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( me->lasue, me );
+ lfds710_list_asu_insert_at_start( &ms->list_of_allocations, &me->lasue );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static void alloc_and_init_memory_element( struct libshared_memory_element **me, void *memory, lfds710_pal_uint_t memory_size_in_bytes )
+{
+ lfds710_pal_uint_t
+ alignment_bump,
+ size_in_bytes,
+ total_size_in_bytes;
+
+ LFDS710_PAL_ASSERT( me != NULL );
+ LFDS710_PAL_ASSERT( memory != NULL );
+ // TRD : memory_size_in_bytes can be any value in its range
+
+ alignment_bump = (lfds710_pal_uint_t) memory % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES;
+
+ if( alignment_bump != 0 )
+ alignment_bump = LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES - alignment_bump;
+
+ size_in_bytes = sizeof( struct libshared_memory_element );
+
+ total_size_in_bytes = size_in_bytes + alignment_bump;
+
+ *me = (struct libshared_memory_element *) ( (char unsigned *) memory + alignment_bump );
+
+ (*me)->original = memory;
+ (*me)->original_memory_size_in_bytes = memory_size_in_bytes;
+
+ (*me)->original_after_me_alloc = (*me)->current_pointer = (char unsigned *) memory + total_size_in_bytes;
+ (*me)->original_after_me_alloc_memory_size_in_bytes = (*me)->current_memory_size_in_bytes = memory_size_in_bytes - total_size_in_bytes;
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libshared_memory_internal.h"
+
+/***** private prototypes *****/
+static void *alloc_from_memory_element( struct libshared_memory_element *me, lfds710_pal_uint_t size_in_bytes, lfds710_pal_uint_t alignment_in_bytes );
+
+
+
+
+
+/****************************************************************************/
+void *libshared_memory_alloc_from_unknown_node( struct libshared_memory_state *ms, lfds710_pal_uint_t size_in_bytes, lfds710_pal_uint_t alignment_in_bytes )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libshared_memory_element
+ *me;
+
+ void
+ *allocation = NULL;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : size_in_bytes can be any value in its range
+ // TRD : alignment_in_bytes can be any value in its range
+
+ while( allocation == NULL and LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(ms->list_of_allocations,lasue) )
+ {
+ me = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ if( me->known_numa_node_flag == LOWERED )
+ allocation = alloc_from_memory_element( me, size_in_bytes, alignment_in_bytes );
+ }
+
+ return allocation;
+}
+
+
+
+
+
+/****************************************************************************/
+void *libshared_memory_alloc_largest_possible_array_from_unknown_node( struct libshared_memory_state *ms, lfds710_pal_uint_t element_size_in_bytes, lfds710_pal_uint_t alignment_in_bytes, lfds710_pal_uint_t *number_elements )
+{
+ lfds710_pal_uint_t
+ alignment_bump;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libshared_memory_element
+ *me = NULL,
+ *temp_me;
+
+ void
+ *allocation;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : element_size_in_bytes can be any value in its range
+ // TRD : alignment_in_bytes can be any value in its range
+ LFDS710_PAL_ASSERT( number_elements != NULL );
+
+ /* TRD : find the largest unknown-node memory element
+ then alloc from that
+ */
+
+ // TRD : find the correct memory element - in this case, the one with most free space
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(ms->list_of_allocations,lasue) )
+ {
+ temp_me = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ if( temp_me->known_numa_node_flag == LOWERED )
+ if( me == NULL or temp_me->current_memory_size_in_bytes > me->current_memory_size_in_bytes )
+ me = temp_me;
+ }
+
+ alignment_bump = (lfds710_pal_uint_t) me->current_pointer % alignment_in_bytes;
+
+ if( alignment_bump != 0 )
+ alignment_bump = alignment_in_bytes - alignment_bump;
+
+ *number_elements = (me->current_memory_size_in_bytes - alignment_bump) / element_size_in_bytes;
+
+ allocation = alloc_from_memory_element( me, *number_elements * element_size_in_bytes, alignment_in_bytes );
+
+ return allocation;
+}
+
+
+
+
+
+/****************************************************************************/
+void *libshared_memory_alloc_from_specific_node( struct libshared_memory_state *ms, lfds710_pal_uint_t numa_node_id, lfds710_pal_uint_t size_in_bytes, lfds710_pal_uint_t alignment_in_bytes )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libshared_memory_element
+ *me;
+
+ void
+ *allocation = NULL;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : numa_node_id can be any value in its range
+ // TRD : size_in_bytes can be any value in its range
+ // TRD : alignment_in_bytes can be any value in its range
+
+ while( allocation == NULL and LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(ms->list_of_allocations,lasue) )
+ {
+ me = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ if( me->known_numa_node_flag == RAISED and me->numa_node_id == numa_node_id )
+ allocation = alloc_from_memory_element( me, size_in_bytes, alignment_in_bytes );
+ }
+
+ return allocation;
+}
+
+
+
+
+
+/****************************************************************************/
+void *libshared_memory_alloc_from_most_free_space_node( struct libshared_memory_state *ms, lfds710_pal_uint_t size_in_bytes, lfds710_pal_uint_t alignment_in_bytes )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libshared_memory_element
+ *me = NULL,
+ *temp_me;
+
+ void
+ *allocation;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : size_in_bytes can be any value in its range
+ // TRD : alignment_in_bytes can be any value in its range
+
+ // TRD : find the correct memory element - in this case, the one with most free space
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(ms->list_of_allocations,lasue) )
+ {
+ temp_me = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ if( me == NULL or temp_me->current_memory_size_in_bytes > me->current_memory_size_in_bytes )
+ me = temp_me;
+ }
+
+ allocation = alloc_from_memory_element( me, size_in_bytes, alignment_in_bytes );
+
+ return allocation;
+}
+
+
+
+
+
+/****************************************************************************/
+static void *alloc_from_memory_element( struct libshared_memory_element *me, lfds710_pal_uint_t size_in_bytes, lfds710_pal_uint_t alignment_in_bytes )
+{
+ lfds710_pal_uint_t
+ alignment_bump,
+ total_size_in_bytes;
+
+ void
+ *allocation;
+
+ LFDS710_PAL_ASSERT( me != NULL );
+ // TRD : size_in_bytes can be any value in its range
+ // TRD : alignment_in_bytes can be any value in its range
+
+ alignment_bump = (lfds710_pal_uint_t) me->current_pointer % alignment_in_bytes;
+
+ if( alignment_bump != 0 )
+ alignment_bump = alignment_in_bytes - alignment_bump;
+
+ total_size_in_bytes = size_in_bytes + alignment_bump;
+
+ if( total_size_in_bytes > me->current_memory_size_in_bytes )
+ return NULL;
+
+ me->current_pointer += alignment_bump;
+
+ allocation = me->current_pointer;
+
+ me->current_pointer += size_in_bytes;
+
+ me->current_memory_size_in_bytes -= total_size_in_bytes;
+
+ return allocation;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libshared_memory_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libshared_memory_cleanup( struct libshared_memory_state *ms, void (*memory_cleanup_callback)(enum flag known_numa_node_flag, void *store, lfds710_pal_uint_t size) )
+{
+ struct lfds710_list_asu_element
+ *lasue,
+ *lasue_next;
+
+ struct libshared_memory_element
+ *me;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : memory_cleanup_callback can be NULL
+
+ if( memory_cleanup_callback != NULL )
+ {
+ // TRD : remember that the allocation we're free()ing may contain the list element itself
+
+ lasue = LFDS710_LIST_ASU_GET_START( ms->list_of_allocations );
+
+ while( lasue != NULL )
+ {
+ lasue_next = LFDS710_LIST_ASU_GET_NEXT( *lasue );
+ me = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ memory_cleanup_callback( me->known_numa_node_flag, me->original, me->original_memory_size_in_bytes );
+ lasue = lasue_next;
+ }
+ }
+
+ lfds710_list_asu_cleanup( &ms->list_of_allocations, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libshared_memory_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libshared_memory_init( struct libshared_memory_state *ms )
+{
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ lfds710_list_asu_init_valid_on_current_logical_core( &ms->list_of_allocations, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libshared_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libshared_memory_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libshared_memory_query( struct libshared_memory_state *ms, enum libshared_memory_query query_type, void *query_input, void *query_output )
+{
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : query_type can be any value in its range
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ switch( query_type )
+ {
+ case LIBSHARED_MEMORY_QUERY_GET_AVAILABLE:
+ {
+ lfds710_pal_uint_t
+ available_bytes = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libshared_memory_element
+ *me;
+
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(ms->list_of_allocations,lasue) )
+ {
+ me = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ available_bytes += me->current_memory_size_in_bytes;
+ }
+
+ *(lfds710_pal_uint_t *) query_output = available_bytes;
+ }
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libshared_memory_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libshared_memory_set_rollback( struct libshared_memory_state *ms )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libshared_memory_element
+ *me;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(ms->list_of_allocations,lasue) )
+ {
+ me = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ me->rollback = me->current_pointer;
+ me->rollback_memory_size_in_bytes = me->current_memory_size_in_bytes;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libshared_memory_rollback( struct libshared_memory_state *ms )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libshared_memory_element
+ *me;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(ms->list_of_allocations,lasue) )
+ {
+ me = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ me->current_pointer = me->rollback;
+ me->current_memory_size_in_bytes = me->rollback_memory_size_in_bytes;
+ }
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libshared_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libshared_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libshared_misc_query( enum libshared_misc_query query_type, void *query_input, void *query_output )
+{
+ // TRD : query type can be any value in its range
+ // TRD : query_input can be NULL in some cases
+ // TRD : query_outputput can be NULL in some cases
+
+ switch( query_type )
+ {
+ case LIBSHARED_MISC_QUERY_GET_BUILD_AND_VERSION_STRING:
+ {
+ char static const
+ * const build_and_version_string = "libshared " LIBSHARED_MISC_VERSION_STRING " (" BUILD_TYPE_STRING ", " LIBSHARED_PAL_OS_STRING ", " MODE_TYPE_STRING ")";
+
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ *(char const **) query_output = build_and_version_string;
+ }
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libshared_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libshared_porting_abstraction_layer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && NTDDI_VERSION >= NTDDI_WIN7 && !defined KERNEL_MODE )
+
+ #ifdef LIBSHARED_PAL_THREAD_START
+ #error More than one porting abstraction layer matches the current platform in "libshared_porting_abstraction_layer_thread_start.c".
+ #endif
+
+ #define LIBSHARED_PAL_THREAD_START
+
+ int libshared_pal_thread_start( libshared_pal_thread_handle_t *thread_handle,
+ struct libshared_pal_thread_info *pti )
+ {
+ BOOL
+ brv;
+
+ DWORD
+ thread_id;
+
+ GROUP_AFFINITY
+ ga;
+
+ int
+ rv = 0;
+
+ LPPROC_THREAD_ATTRIBUTE_LIST
+ attribute_list;
+
+ SIZE_T
+ attribute_list_length;
+
+ LFDS710_PAL_ASSERT( thread_handle != NULL );
+ LFDS710_PAL_ASSERT( pti != NULL );
+
+ /* TRD : here we're using CreateRemoteThreadEx() to start a thread in our own process
+ we do this because as a function, it allows us to specify processor and processor group affinity in the create call
+ */
+
+ brv = InitializeProcThreadAttributeList( NULL, 1, 0, &attribute_list_length );
+ attribute_list = VirtualAlloc( NULL, attribute_list_length, MEM_COMMIT, PAGE_READWRITE );
+ brv = InitializeProcThreadAttributeList( attribute_list, 1, 0, &attribute_list_length );
+
+ ga.Mask = ( (KAFFINITY) 1 << LIBSHARED_PAL_PTI_GET_LOGICAL_PROCESSOR_NUMBER(*pti) );
+ ga.Group = (WORD) LIBSHARED_PAL_PTI_GET_WINDOWS_PROCESSOR_GROUP_NUMBER(*pti);
+ ga.Reserved[0] = ga.Reserved[1] = ga.Reserved[2] = 0;
+
+ brv = UpdateProcThreadAttribute( attribute_list, 0, PROC_THREAD_ATTRIBUTE_GROUP_AFFINITY, &ga, sizeof(GROUP_AFFINITY), NULL, NULL );
+ *thread_handle = CreateRemoteThreadEx( GetCurrentProcess(), NULL, 0, LIBSHARED_PAL_PTI_GET_THREAD_FUNCTION(*pti), LIBSHARED_PAL_PTI_GET_THREAD_ARGUMENT(*pti), NO_FLAGS, attribute_list, &thread_id );
+
+ DeleteProcThreadAttributeList( attribute_list );
+ VirtualFree( attribute_list, 0, MEM_RELEASE );
+
+ if( *thread_handle != NULL )
+ rv = 1;
+
+ return rv;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && NTDDI_VERSION >= NTDDI_WINXP && NTDDI_VERSION < NTDDI_WIN7 && !defined KERNEL_MODE )
+
+ #ifdef LIBSHARED_PAL_THREAD_START
+ #error More than one porting abstraction layer matches the current platform in "libshared_porting_abstraction_layer_thread_start.c".
+ #endif
+
+ #define LIBSHARED_PAL_THREAD_START
+
+ int libshared_pal_thread_start( libshared_pal_thread_handle_t *thread_handle,
+ struct libshared_pal_thread_info *pti )
+ {
+ int
+ rv = 0;
+
+ DWORD
+ thread_id;
+
+ DWORD_PTR
+ affinity_mask,
+ result;
+
+ LFDS710_PAL_ASSERT( thread_handle != NULL );
+ LFDS710_PAL_ASSERT( pti != NULL );
+
+ /* TRD : Vista and earlier do not support processor groups
+ as such, there is a single implicit processor group
+ also, there's no support for actually starting a thread in its correct NUMA node / logical processor
+ so we make the best of it; we start suspended, set the affinity, and then resume
+ the thread itself internally is expected to be making allocs from the correct NUMA node
+ */
+
+ *thread_handle = CreateThread( NULL, 0, LIBSHARED_PAL_PTI_GET_THREAD_FUNCTION(*pti), LIBSHARED_PAL_PTI_GET_THREAD_ARGUMENT(*pti), CREATE_SUSPENDED, &thread_id );
+
+ if( *thread_handle != NULL )
+ {
+ affinity_mask = (DWORD_PTR) (1 << LIBSHARED_PAL_PTI_GET_LOGICAL_PROCESSOR_NUMBER(*pti));
+ SetThreadAffinityMask( *thread_handle, affinity_mask );
+ ResumeThread( *thread_handle );
+ rv = 1;
+ }
+
+ return rv;
+ }
+
+#endif
+
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && NTDDI_VERSION >= NTDDI_WINXP && NTDDI_VERSION < NTDDI_WIN7 && defined KERNEL_MODE )
+
+ #ifdef LIBSHARED_PAL_THREAD_START
+ #error More than one porting abstraction layer matches the current platform in "libshared_porting_abstraction_layer_thread_start.c".
+ #endif
+
+ #define LIBSHARED_PAL_THREAD_START
+
+ /***** prototypes *****/
+ static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION test_pal_internal_thread_function( void *thread_user_state );
+
+ int libshared_pal_thread_start( libshared_pal_thread_handle_t *thread_handle,
+ struct libshared_pal_thread_info *pti )
+ {
+ int
+ rv = 0;
+
+ NTSTATUS
+ nts_create;
+
+ OBJECT_ATTRIBUTES
+ oa;
+
+ assert( thread_state != NULL );
+ // TRD : cpu can be any value in its range
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ InitializeObjectAttributes( &oa, NULL, OBJ_KERNEL_HANDLE, NULL, NULL );
+
+ nts_create = PsCreateSystemThread( thread_handle, THREAD_ALL_ACCESS, &oa, NtCurrentProcess(), NULL, test_pal_internal_thread_function, pti );
+
+ if( nts_create == STATUS_SUCCESS )
+ rv = 1;
+
+ return rv;
+ }
+
+ /****************************************************************************/
+ static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION test_pal_internal_thread_function( void *thread_user_state )
+ {
+ KAFFINITY
+ affinity;
+
+ struct libshared_pal_thread_info
+ *pti;
+
+ LFDS710_PAL_ASSERT( thread_user_state != NULL );
+
+ pti = (struct libshared_pal_thread_info *) thread_user_state;
+
+ affinity = 1 << LIBSHARED_PAL_PTI_GET_LOGICAL_PROCESSOR_NUMBER(*pti);
+
+ KeSetSystemAffinityThread( affinity );
+
+ LIBSHARED_PAL_PTI_GET_THREAD_FUNCTION(*pti)( LIBSHARED_PAL_PTI_GET_THREAD_ARGUMENT(*pti) );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && NTDDI_VERSION >= NTDDI_WIN7 && defined KERNEL_MODE )
+
+ #ifdef LIBSHARED_PAL_THREAD_START
+ #error More than one porting abstraction layer matches the current platform in "libshared_porting_abstraction_layer_thread_start.c".
+ #endif
+
+ #define LIBSHARED_PAL_THREAD_START
+
+ /***** prototypes *****/
+ static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION test_pal_internal_thread_function( void *thread_user_state );
+
+ int libshared_pal_thread_start( libshared_pal_thread_handle_t *thread_handle,
+ struct libshared_pal_thread_info *pti )
+ {
+ int
+ rv = 0;
+
+ NTSTATUS
+ nts_create;
+
+ OBJECT_ATTRIBUTES
+ oa;
+
+ assert( thread_state != NULL );
+ // TRD : cpu can be any value in its range
+ assert( thread_function != NULL );
+ // TRD : thread_user_state can be NULL
+
+ InitializeObjectAttributes( &oa, NULL, OBJ_KERNEL_HANDLE, NULL, NULL );
+
+ nts_create = PsCreateSystemThread( thread_handle, THREAD_ALL_ACCESS, &oa, NtCurrentProcess(), NULL, test_pal_internal_thread_function, pti );
+
+ if( nts_create == STATUS_SUCCESS )
+ rv = 1;
+
+ return rv;
+ }
+
+ /****************************************************************************/
+ static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION test_pal_internal_thread_function( void *thread_user_state )
+ {
+ GROUP_AFFINITY
+ group_affinity,
+ previous_group_affinity;
+
+ struct libshared_pal_thread_info
+ *pti;
+
+ LFDS710_PAL_ASSERT( thread_user_state != NULL );
+
+ pti = (struct libshared_pal_thread_info *) thread_user_state;
+
+ KeSetSystemGroupAffinityThread( &group_affinity, &previous_group_affinity );
+
+ group_affinity.Mask = ( (KAFFINITY) 1 ) << LIBSHARED_PAL_PTI_GET_LOGICAL_PROCESSOR_NUMBER(*pti);
+ // TRD : Group is a WORD in the user-mode MS docs, but a USHORT in WDK7.1 headers
+ group_affinity.Group = (USHORT) LIBSHARED_PAL_PTI_GET_WINDOWS_PROCESSOR_GROUP_NUMBER(*pti);
+ group_affinity.Reserved[0] = group_affinity.Reserved[1] = group_affinity.Reserved[2] = 0;
+
+ LIBSHARED_PAL_PTI_GET_THREAD_FUNCTION(*pti)( LIBSHARED_PAL_PTI_GET_THREAD_ARGUMENT(*pti) );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && defined _POSIX_THREADS && _POSIX_THREADS > 0 && !defined KERNEL_MODE )
+
+ #ifdef LIBSHARED_PAL_THREAD_START
+ #error More than one porting abstraction layer matches the current platform in "libshared_porting_abstraction_layer_thread_start.c".
+ #endif
+
+ #define LIBSHARED_PAL_THREAD_START
+
+ /***** prototypes *****/
+ static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION test_pal_internal_thread_function( void *thread_user_state );
+
+ /****************************************************************************/
+ int libshared_pal_thread_start( libshared_pal_thread_handle_t *thread_handle,
+ struct libshared_pal_thread_info *pti )
+ {
+ int
+ rv;
+
+ /* TRD : this implementation exists because the pthreads function for setting thread affinity,
+ pthread_attr_setaffinity_np(), works on Linux, but not Android
+ */
+
+ LFDS710_PAL_ASSERT( thread_handle != NULL );
+ LFDS710_PAL_ASSERT( pti != NULL );
+
+ rv = pthread_create( thread_handle, NULL, test_pal_internal_thread_function, pti );
+
+ if( rv == 0 )
+ rv = 1;
+
+ return rv;
+ }
+
+ /****************************************************************************/
+ static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION test_pal_internal_thread_function( void *thread_user_state )
+ {
+ cpu_set_t
+ cpuset;
+
+ pid_t
+ tid;
+
+ struct libshared_pal_thread_info
+ *pti;
+
+ LIBSHARED_PAL_THREAD_RETURN_TYPE
+ rv;
+
+ LFDS710_PAL_ASSERT( thread_user_state != NULL );
+
+ /* TRD : the APIs under Linux/POSIX for setting thread affinity are in a mess
+
+ pthreads offers pthread_attr_setaffinity_np(), which glibc supports, but which is not supported by Android
+
+ Linux offers sched_setaffinity(), but this needs a *thread pid*,
+ and the only API to get a thread pid is gettid(), which works for
+ and only for *the calling thread*
+
+ so we come to this - a wrapper thread function, which is the function used
+ when starting a thread; this calls gettid() and then sched_setaffinity(),
+ and then calls into the actual thread function
+
+ generally shaking my head in disbelief at this point
+ */
+
+ pti = (struct libshared_pal_thread_info *) thread_user_state;
+
+ CPU_ZERO( &cpuset );
+ CPU_SET( LIBSHARED_PAL_PTI_GET_LOGICAL_PROCESSOR_NUMBER(*pti), &cpuset );
+
+ tid = syscall( SYS_gettid );
+
+ sched_setaffinity( tid, sizeof(cpu_set_t), &cpuset );
+
+ rv = LIBSHARED_PAL_PTI_GET_THREAD_FUNCTION(*pti)( LIBSHARED_PAL_PTI_GET_THREAD_ARGUMENT(*pti) );
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(rv);
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined __linux__ && defined _POSIX_THREADS && _POSIX_THREADS > 0 && !defined KERNEL_MODE )
+
+ #ifdef LIBSHARED_PAL_THREAD_START
+ #error More than one porting abstraction layer matches the current platform in "libshared_porting_abstraction_layer_thread_start.c".
+ #endif
+
+ #define LIBSHARED_PAL_THREAD_START
+
+ int libshared_pal_thread_start( libshared_pal_thread_handle_t *thread_handle,
+ struct libshared_pal_thread_info *pti )
+ {
+ int
+ rv = 0,
+ rv_create;
+
+ cpu_set_t
+ cpuset;
+
+ pthread_attr_t
+ attr;
+
+ LFDS710_PAL_ASSERT( thread_handle != NULL );
+ LFDS710_PAL_ASSERT( pti != NULL );
+
+ pthread_attr_init( &attr );
+
+ CPU_ZERO( &cpuset );
+ CPU_SET( LIBSHARED_PAL_PTI_GET_LOGICAL_PROCESSOR_NUMBER(*pti), &cpuset );
+ pthread_attr_setaffinity_np( &attr, sizeof(cpuset), &cpuset );
+
+ rv_create = pthread_create( thread_handle, &attr, LIBSHARED_PAL_PTI_GET_THREAD_FUNCTION(*pti), LIBSHARED_PAL_PTI_GET_THREAD_ARGUMENT(*pti) );
+
+ if( rv_create == 0 )
+ rv = 1;
+
+ pthread_attr_destroy( &attr );
+
+ return rv;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && defined KERNEL_MODE )
+
+ #ifdef LIBSHARED_PAL_THREAD_START
+ #error More than one porting abstraction layer matches the current platform in "libshared_porting_abstraction_layer_thread_start.c".
+ #endif
+
+ #define LIBSHARED_PAL_THREAD_START
+
+ int libshared_pal_thread_start( libshared_pal_thread_handle_t *thread_handle,
+ struct libshared_pal_thread_info *pti )
+ {
+ LFDS710_PAL_ASSERT( thread_handle != NULL );
+ LFDS710_PAL_ASSERT( pti != NULL );
+
+ *thread_handle = kthread_create_on_node( LIBSHARED_PAL_PTI_GET_THREAD_FUNCTION(*pti), LIBSHARED_PAL_PTI_GET_THREAD_ARGUMENT(*pti), (int) LIBSHARED_PAL_PTI_GET_NUMA_NODE_ID(*pti), "lfds" );
+
+ kthread_bind( *thread_handle, LIBSHARED_PAL_PTI_GET_LOGICAL_PROCESSOR_NUMBER(*pti) );
+
+ wake_up_process( *thread_handle );
+
+ return 1;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBSHARED_PAL_THREAD_START )
+
+ #error No matching porting abstraction layer in "libshared_porting_abstraction_layer_thread_start.c".
+
+#endif
+
--- /dev/null
+/***** includes *****/
+#include "libshared_porting_abstraction_layer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && !defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_WINXP )
+
+ #ifdef LIBSHARED_PAL_THREAD_WAIT
+ #error More than one porting abstraction layer matches current platform in "libshared_porting_abstraction_layer_thread_wait.c".
+ #endif
+
+ #define LIBSHARED_PAL_THREAD_WAIT
+
+ void libshared_pal_thread_wait( libshared_pal_thread_handle_t thread_handle )
+ {
+ // TRD : thread_handle can be any value in its range
+
+ WaitForSingleObject( thread_handle, INFINITE );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_WINXP )
+
+ #ifdef LIBSHARED_PAL_THREAD_WAIT
+ #error More than one porting abstraction layer matches current platform in "libshared_porting_abstraction_layer_thread_wait.c".
+ #endif
+
+ #define LIBSHARED_PAL_THREAD_WAIT
+
+ void libshared_pal_thread_wait( libshared_pal_thread_handle_t thread_handle )
+ {
+ // TRD : thread_handle can be any value in its range
+
+ KeWaitForSingleObject( thread_handle, Executive, KernelMode, FALSE, NULL );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _POSIX_THREADS && _POSIX_THREADS > 0 && !defined KERNEL_MODE )
+
+ #ifdef LIBSHARED_PAL_THREAD_WAIT
+ #error More than one porting abstraction layer matches current platform in "libshared_porting_abstraction_layer_thread_wait.c".
+ #endif
+
+ #define LIBSHARED_PAL_THREAD_WAIT
+
+ void libshared_pal_thread_wait( libshared_pal_thread_handle_t thread_handle )
+ {
+ // TRD : thread_handle can be any value in its range
+
+ pthread_join( thread_handle, NULL );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && defined KERNEL_MODE )
+
+ #ifdef LIBSHARED_PAL_THREAD_WAIT
+ #error More than one porting abstraction layer matches current platform in "libshared_porting_abstraction_layer_thread_wait.c".
+ #endif
+
+ #define LIBSHARED_PAL_THREAD_WAIT
+
+ void libshared_pal_thread_wait( libshared_pal_thread_handle_t thread_handle )
+ {
+ // TRD : thread_handle can be any value in its range
+
+ /* TRD : turns out this function does not exist in the linux kernel
+ you have to manage your own inter-thread sync
+ that breaks the lfds abstraction for thread start/wait
+ so this isn't going to get fixed in time for this release
+ leaving the function here so compilation will pass
+ */
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBSHARED_PAL_THREAD_WAIT )
+
+ #error No matching porting abstraction layer in "libshared_porting_abstraction_layer_thread_wait.c".
+
+#endif
+
--- /dev/null
+##### paths #####
+BINDIR := ../../bin
+INCDIR := ../../inc
+OBJDIR := ../../obj
+SRCDIR := ../../src
+
+##### misc #####
+QUIETLY := 1>/dev/null 2>/dev/null
+VERSION_NUMBER := 1
+MINOR_NUMBER := 0
+RELEASE_NUMBER := 0
+
+##### sources, objects and libraries #####
+BINNAME := libtest
+ARFILENAME := $(BINNAME).a
+ARPATHNAME := $(BINDIR)/$(ARFILENAME)
+SOBASENAME := $(BINNAME).so
+SONAME := $(SOBASENAME).$(VERSION_NUMBER)
+SOFILENAME := $(SONAME).$(MINOR_NUMBER).$(RELEASE_NUMBER)
+SOPATHNAME := $(BINDIR)/$(SOFILENAME)
+INCNAME := $(INCDIR)/$(BINNAME).h
+SRCDIRS := libtest_misc libtest_porting_abstraction_layer libtest_results libtest_test libtest_tests libtest_testsuite libtest_threadset
+SOURCES := libtest_misc_determine_erg.c libtest_misc_globals.c libtest_misc_memory_helpers.c libtest_misc_pal_helpers.c libtest_misc_query.c \
+ libtest_porting_abstraction_layer_free.c libtest_porting_abstraction_layer_get_full_logical_processor_set.c libtest_porting_abstraction_layer_malloc.c \
+ libtest_results_cleanup.c libtest_results_get_result.c libtest_results_init.c libtest_results_put_result.c \
+ libtest_test_cleanup.c libtest_test_init.c libtest_test_run.c \
+ libtest_tests_btree_addonly_unbalanced_alignment.c libtest_tests_btree_addonly_unbalanced_random_adds_fail.c libtest_tests_btree_addonly_unbalanced_random_adds_fail_and_overwrite.c libtest_tests_btree_addonly_unbalanced_random_adds_overwrite.c \
+ libtest_tests_freelist_alignment.c libtest_tests_freelist_ea_popping.c libtest_tests_freelist_ea_popping_and_pushing.c libtest_tests_freelist_ea_pushing.c libtest_tests_freelist_ea_rapid_popping_and_pushing.c libtest_tests_freelist_without_ea_popping.c libtest_tests_freelist_without_ea_popping_and_pushing.c libtest_tests_freelist_without_ea_pushing.c libtest_tests_freelist_without_ea_rapid_popping_and_pushing.c \
+ libtest_tests_hash_addonly_alignment.c libtest_tests_hash_addonly_iterate.c libtest_tests_hash_addonly_random_adds_fail.c libtest_tests_hash_addonly_fail_and_overwrite.c libtest_tests_hash_addonly_random_adds_overwrite.c \
+ libtest_tests_list_addonly_singlylinked_ordered_alignment.c libtest_tests_list_addonly_singlylinked_ordered_new_ordered.c libtest_tests_list_addonly_singlylinked_ordered_new_ordered_with_cursor.c \
+ libtest_tests_list_addonly_singlylinked_unordered_alignment.c libtest_tests_list_addonly_singlylinked_unordered_new_after.c libtest_tests_list_addonly_singlylinked_unordered_new_end.c libtest_tests_list_addonly_singlylinked_unordered_new_start.c \
+ libtest_tests_porting_abstraction_layer_atomic_add.c libtest_tests_porting_abstraction_layer_atomic_cas.c libtest_tests_porting_abstraction_layer_atomic_dwcas.c libtest_tests_porting_abstraction_layer_atomic_exchange.c \
+ libtest_tests_prng_alignment.c libtest_tests_prng_generate.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_alignment.c libtest_tests_queue_bounded_manyproducer_manyconsumer_count.c libtest_tests_queue_bounded_manyproducer_manyconsumer_enqueuing.c libtest_tests_queue_bounded_manyproducer_manyconsumer_dequeuing.c libtest_tests_queue_bounded_manyproducer_manyconsumer_enqueuing_and_dequeuing.c libtest_tests_queue_bounded_manyproducer_manyconsumer_rapid_enqueuing_and_dequeuing.c \
+ libtest_tests_queue_bounded_singleproducer_singleconsumer_dequeuing.c libtest_tests_queue_bounded_singleproducer_singleconsumer_enqueuing.c libtest_tests_queue_bounded_singleproducer_singleconsumer_enqueuing_and_dequeuing.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_alignment.c libtest_tests_queue_unbounded_manyproducer_manyconsumer_dequeuing.c libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing.c libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing_and_dequeuing.c libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing_with_malloc_and_dequeuing_with_free.c libtest_tests_queue_unbounded_manyproducer_manyconsumer_rapid_enqueuing_and_dequeuing.c \
+ libtest_tests_ringbuffer_reading.c libtest_tests_ringbuffer_reading_and_writing.c libtest_tests_ringbuffer_writing.c \
+ libtest_tests_stack_alignment.c libtest_tests_stack_popping.c libtest_tests_stack_popping_and_pushing.c libtest_tests_stack_pushing.c libtest_tests_stack_rapid_popping_and_pushing.c \
+ libtest_testsuite_cleanup.c libtest_testsuite_init.c libtest_testsuite_run.c \
+ libtest_threadset_add.c libtest_threadset_cleanup.c libtest_threadset_init.c libtest_threadset_operations.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))
+SYSLIBS :=
+
+##### tools #####
+DG := gcc
+DGFLAGS_MANDATORY := -MM
+DGFLAGS_OPTIONAL := -std=gnu89
+
+CC := gcc
+CFLAGS_MANDATORY := -c -fno-strict-aliasing
+CFLAGS_OPTIONAL := -std=gnu89 -Wall -Werror -Wno-unknown-pragmas -Wno-uninitialized -Wno-unused-but-set-variable
+CFLAGS_MANDATORY_COV := -O0 -ggdb -DCOVERAGE -fprofile-arcs -ftest-coverage
+CFLAGS_MANDATORY_DBG := -O0 -ggdb -D_DEBUG
+CFLAGS_MANDATORY_PROF := -O0 -ggdb -DPROF -pg
+CFLAGS_MANDATORY_REL := -O2 -DNDEBUG
+CFLAGS_MANDATORY_TSAN := -O0 -ggdb -DTSAN -fsanitize=thread -fPIC
+
+AR := ar
+ARFLAGS :=
+ARFLAGS_MANDATORY := rcs
+ARFLAGS_OPTIONAL :=
+
+LD := gcc
+LDFLAGS_MANDATORY := -shared -Wl,-soname,$(SONAME) -o $(SOPATHNAME)
+LDFLAGS_OPTIONAL := -std=gnu89 -Wall -Werror
+LDFLAGS_MANDATORY_COV := -O0 -fprofile-arcs -ftest-coverage
+LDFLAGS_MANDATORY_DBG := -O0 -ggdb
+LDFLAGS_MANDATORY_PROF := -O0 -pg
+LDFLAGS_MANDATORY_REL := -O2 -s
+LDFLAGS_MANDATORY_TSAN := -O0 -fsanitize=thread -fPIC
+
+##### build variants #####
+ifeq ($(findstring so,$(MAKECMDGOALS)),so)
+ CFLAGS_MANDATORY += -fPIC
+endif
+
+# TRD : default to debug
+ifeq ($(MAKECMDGOALS),)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring cov,$(MAKECMDGOALS)),cov)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_COV)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_COV)
+ SYSLIBS += -lgcov
+endif
+
+ifeq ($(findstring dbg,$(MAKECMDGOALS)),dbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring prof,$(MAKECMDGOALS)),prof)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_PROF)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_PROF)
+endif
+
+ifeq ($(findstring rel,$(MAKECMDGOALS)),rel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+endif
+
+ifeq ($(findstring tsan,$(MAKECMDGOALS)),tsan)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_TSAN)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_TSAN)
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.o : %.c
+ $(DG) $(DGFLAGS_OPTIONAL) $(DGFLAGS) $(DGFLAGS_MANDATORY) $< >$(OBJDIR)/$*.d
+ $(CC) $(CFLAGS_OPTIONAL) $(CFLAGS) $(CFLAGS_MANDATORY) -o $@ $<
+
+##### explicit rules #####
+$(ARPATHNAME) : $(OBJECTS)
+ $(AR) $(ARFLAGS_OPTIONAL) $(ARFLAGS) $(ARFLAGS_MANDATORY) $(ARPATHNAME) $(OBJECTS)
+
+$(SOPATHNAME) : $(OBJECTS)
+ $(LD) $(LDFLAGS_OPTIONAL) $(LDFLAGS) $(LDFLAGS_MANDATORY) $(OBJECTS) -o $(SOPATHNAME)
+ @ln -fs $(SOFILENAME) $(BINDIR)/$(SONAME)
+ @ln -fs $(SOFILENAME) $(BINDIR)/$(SOBASENAME)
+
+##### phony #####
+.PHONY : clean ar_cov ar_dbg ar_prof ar_rel ar_tsan ar_vanilla so_cov so_dbg so_prof so_rel so_tsan so_vanilla
+
+clean :
+ @rm -f $(BINDIR)/* $(OBJDIR)/*
+
+ar_cov : $(ARPATHNAME) # archive (.a), coverage
+ar_dbg : $(ARPATHNAME) # archive (.a), debug
+ar_prof : $(ARPATHNAME) # archive (.a), profiling
+ar_rel : $(ARPATHNAME) # archive (.a), release
+ar_tsan : $(ARPATHNAME) # archive (.a), thread sanitizer
+ar_vanilla : $(ARPATHNAME) # archive (.a), no specific-build arguments
+ar_install :
+ # TRD : leading backslash to use command rather than alias
+ # as many Linux distros have a built-in alias to force
+ # a prompt ("y/n?") on file overwrite - silent and
+ # unexpected interference which breaks a makefile
+ @mkdir -p $(INSLIBDIR)
+ @\cp $(ARPATHNAME) $(INSLIBDIR)
+ @mkdir -p $(INSINCDIR)
+ @\cp -r $(INCDIR)/* $(INSINCDIR)
+ar_uninstall :
+ @rm $(INSLIBDIR)/$(ARFILENAME)
+ @rm -r $(INSINCDIR)/$(BINNAME)
+ @rm -r $(INSINCDIR)/$(BINNAME).h
+
+so_cov : $(SOPATHNAME) # shared (.so), coverage
+so_dbg : $(SOPATHNAME) # shared (.so), debug
+so_prof : $(SOPATHNAME) # shared (.so), profiling
+so_rel : $(SOPATHNAME) # shared (.so), release
+so_tsan : $(SOPATHNAME) # shared (.so), thread sanitizer
+so_vanilla : $(SOPATHNAME) # shared (.so), no specific-build arguments
+so_install :
+ @mkdir -p $(INSINCDIR)
+ @\cp $(SOPATHNAME) $(INSLIBDIR)
+ @ldconfig -vn $(INSLIBDIR)
+ @ln -s $(SONAME) $(INSLIBDIR)/$(SOBASENAME)
+ @mkdir -p $(INSLIBDIR)
+ @\cp -r $(INCDIR)/* $(INSINCDIR)
+so_uninstall :
+ @rm -f $(INSLIBDIR)/$(SOFILENAME)
+ @rm -f $(INSLIBDIR)/$(SOBASENAME)
+ @rm -f $(INSLIBDIR)/$(SONAME)
+ @rm -r $(INSINCDIR)/$(BINNAME)
+ @rm -r $(INSINCDIR)/$(BINNAME).h
+
+##### dependencies #####
+-include $(DEPENDS)
+
--- /dev/null
+lib-y :=
+
+lib-y += ../../src/libtest_misc/libtest_misc_determine_erg.o
+lib-y += ../../src/libtest_misc/libtest_misc_globals.o
+lib-y += ../../src/libtest_misc/libtest_misc_memory_helpers.o
+lib-y += ../../src/libtest_misc/libtest_misc_pal_helpers.o
+lib-y += ../../src/libtest_misc/libtest_misc_query.o
+
+lib-y += ../../src/libtest_porting_abstraction_layer/libtest_porting_abstraction_layer_free.o
+lib-y += ../../src/libtest_porting_abstraction_layer/libtest_porting_abstraction_layer_get_full_logical_processor_set.o
+lib-y += ../../src/libtest_porting_abstraction_layer/libtest_porting_abstraction_layer_malloc.o
+
+lib-y += ../../src/libtest_results/libtest_results_cleanup.o
+lib-y += ../../src/libtest_results/libtest_results_get_result.o
+lib-y += ../../src/libtest_results/libtest_results_init.o
+lib-y += ../../src/libtest_results/libtest_results_put_result.o
+
+lib-y += ../../src/libtest_test/libtest_test_cleanup.o
+lib-y += ../../src/libtest_test/libtest_test_init.o
+lib-y += ../../src/libtest_test/libtest_test_run.o
+
+lib-y += ../../src/libtest_tests/libtest_tests_btree_addonly_unbalanced_alignment.o
+lib-y += ../../src/libtest_tests/libtest_tests_btree_addonly_unbalanced_random_adds_fail.o
+lib-y += ../../src/libtest_tests/libtest_tests_btree_addonly_unbalanced_random_adds_fail_and_overwrite.o
+lib-y += ../../src/libtest_tests/libtest_tests_btree_addonly_unbalanced_random_adds_overwrite.o
+lib-y += ../../src/libtest_tests/libtest_tests_freelist_alignment.o
+lib-y += ../../src/libtest_tests/libtest_tests_freelist_ea_popping.o
+lib-y += ../../src/libtest_tests/libtest_tests_freelist_ea_popping_and_pushing.o
+lib-y += ../../src/libtest_tests/libtest_tests_freelist_ea_pushing.o
+lib-y += ../../src/libtest_tests/libtest_tests_freelist_ea_rapid_popping_and_pushing.o
+lib-y += ../../src/libtest_tests/libtest_tests_freelist_without_ea_popping.o
+lib-y += ../../src/libtest_tests/libtest_tests_freelist_without_ea_popping_and_pushing.o
+lib-y += ../../src/libtest_tests/libtest_tests_freelist_without_ea_pushing.o
+lib-y += ../../src/libtest_tests/libtest_tests_freelist_without_ea_rapid_popping_and_pushing.o
+lib-y += ../../src/libtest_tests/libtest_tests_hash_addonly_alignment.o
+lib-y += ../../src/libtest_tests/libtest_tests_hash_addonly_iterate.o
+lib-y += ../../src/libtest_tests/libtest_tests_hash_addonly_random_adds_fail.o
+lib-y += ../../src/libtest_tests/libtest_tests_hash_addonly_fail_and_overwrite.o
+lib-y += ../../src/libtest_tests/libtest_tests_hash_addonly_random_adds_overwrite.o
+lib-y += ../../src/libtest_tests/libtest_tests_list_addonly_singlylinked_ordered_alignment.o
+lib-y += ../../src/libtest_tests/libtest_tests_list_addonly_singlylinked_ordered_new_ordered.o
+lib-y += ../../src/libtest_tests/libtest_tests_list_addonly_singlylinked_ordered_new_ordered_with_cursor.o
+lib-y += ../../src/libtest_tests/libtest_tests_list_addonly_singlylinked_unordered_alignment.o
+lib-y += ../../src/libtest_tests/libtest_tests_list_addonly_singlylinked_unordered_new_after.o
+lib-y += ../../src/libtest_tests/libtest_tests_list_addonly_singlylinked_unordered_new_end.o
+lib-y += ../../src/libtest_tests/libtest_tests_list_addonly_singlylinked_unordered_new_start.o
+lib-y += ../../src/libtest_tests/libtest_tests_porting_abstraction_layer_atomic_add.o
+lib-y += ../../src/libtest_tests/libtest_tests_porting_abstraction_layer_atomic_cas.o
+lib-y += ../../src/libtest_tests/libtest_tests_porting_abstraction_layer_atomic_dwcas.o
+lib-y += ../../src/libtest_tests/libtest_tests_porting_abstraction_layer_atomic_exchange.o
+lib-y += ../../src/libtest_tests/libtest_tests_prng_alignment.o
+lib-y += ../../src/libtest_tests/libtest_tests_prng_generate.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_bounded_manyproducer_manyconsumer_alignment.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_bounded_manyproducer_manyconsumer_count.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_bounded_manyproducer_manyconsumer_enqueuing.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_bounded_manyproducer_manyconsumer_dequeuing.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_bounded_manyproducer_manyconsumer_enqueuing_and_dequeuing.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_bounded_manyproducer_manyconsumer_rapid_enqueuing_and_dequeuing.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_bounded_singleproducer_singleconsumer_dequeuing.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_bounded_singleproducer_singleconsumer_enqueuing.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_bounded_singleproducer_singleconsumer_enqueuing_and_dequeuing.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_unbounded_manyproducer_manyconsumer_alignment.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_unbounded_manyproducer_manyconsumer_dequeuing.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing_and_dequeuing.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing_with_malloc_and_dequeuing_with_free.o
+lib-y += ../../src/libtest_tests/libtest_tests_queue_unbounded_manyproducer_manyconsumer_rapid_enqueuing_and_dequeuing.o
+lib-y += ../../src/libtest_tests/libtest_tests_ringbuffer_reading.o
+lib-y += ../../src/libtest_tests/libtest_tests_ringbuffer_reading_and_writing.o
+lib-y += ../../src/libtest_tests/libtest_tests_ringbuffer_writing.o
+lib-y += ../../src/libtest_tests/libtest_tests_stack_alignment.o
+lib-y += ../../src/libtest_tests/libtest_tests_stack_popping.o
+lib-y += ../../src/libtest_tests/libtest_tests_stack_popping_and_pushing.o
+lib-y += ../../src/libtest_tests/libtest_tests_stack_pushing.o
+lib-y += ../../src/libtest_tests/libtest_tests_stack_rapid_popping_and_pushing.o
+
+lib-y += ../../src/libtest_testsuite/libtest_testsuite_cleanup.o
+lib-y += ../../src/libtest_testsuite/libtest_testsuite_init.o
+lib-y += ../../src/libtest_testsuite/libtest_testsuite_run.o
+
+lib-y += ../../src/libtest_threadset/libtest_threadset_add.o
+lib-y += ../../src/libtest_threadset/libtest_threadset_cleanup.o
+lib-y += ../../src/libtest_threadset/libtest_threadset_init.o
+lib-y += ../../src/libtest_threadset/libtest_threadset_operations.o
+
+libs-y := ../../bin/
+
+ccflags-y := -I$(src)/../../inc
+ccflags-y += -I$(src)/../../inc/liblfds710
+ccflags-y += -DKERNEL_MODE
+ccflags-y += -DNDEBUG
+ccflags-y += -fno-strict-aliasing
+ccflags-y += -std=gnu89
+ccflags-y += -Wall
+ccflags-y += -Werror
+ccflags-y += -Wno-unknown-pragmas
+ccflags-y += -Wno-unused-but-set-variable
+ccflags-y += -Wno-uninitialized
+
--- /dev/null
+default:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD)
+
+clean:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) clean
+ find ../../src/ -name "*.o" -type f -delete
+
+help:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) help
+
+modules:
+ $(MAKE) -C /lib/modules/`uname -r`/build M=$(PWD) modules
+
+
--- /dev/null
+EXPORTS
+
+libtest_results_init = libtest_results_init
+libtest_results_cleanup = libtest_results_cleanup
+libtest_results_get_result = libtest_results_get_result
+
+libtest_testsuite_init = libtest_testsuite_init
+libtest_testsuite_cleanup = libtest_testsuite_cleanup
+libtest_testsuite_run = libtest_testsuite_run
+
+libtest_misc_pal_helper_add_logical_processor_to_list_of_logical_processors = libtest_misc_pal_helper_add_logical_processor_to_list_of_logical_processors
+libtest_misc_determine_erg = libtest_misc_determine_erg
+libtest_misc_query = libtest_misc_query
+
--- /dev/null
+##### paths #####
+BINDIR := ..\..\bin
+INCDIR := ..\..\inc
+OBJDIR := ..\..\obj
+SRCDIR := ..\..\src
+
+##### misc #####
+QUIETLY := 1>nul 2>nul
+NULL :=
+SPACE := $(NULL) # TRD : with a trailing space
+
+##### sources, objects and libraries #####
+BINNAME := libtest
+LIB_BINARY := $(BINDIR)\$(BINNAME).lib
+DLL_BINARY := $(BINDIR)\$(BINNAME).dll
+SRCDIRS := libtest_misc libtest_porting_abstraction_layer libtest_results libtest_test libtest_tests libtest_testsuite libtest_threadset
+SOURCES := libtest_misc_determine_erg.c libtest_misc_globals.c libtest_misc_memory_helpers.c libtest_misc_pal_helpers.c libtest_misc_query.c \
+ libtest_porting_abstraction_layer_free.c libtest_porting_abstraction_layer_get_full_logical_processor_set.c libtest_porting_abstraction_layer_malloc.c \
+ libtest_results_cleanup.c libtest_results_get_result.c libtest_results_init.c libtest_results_put_result.c \
+ libtest_test_cleanup.c libtest_test_init.c libtest_test_run.c \
+ libtest_tests_btree_addonly_unbalanced_alignment.c libtest_tests_btree_addonly_unbalanced_random_adds_fail.c libtest_tests_btree_addonly_unbalanced_random_adds_fail_and_overwrite.c libtest_tests_btree_addonly_unbalanced_random_adds_overwrite.c \
+ libtest_tests_freelist_alignment.c libtest_tests_freelist_ea_popping.c libtest_tests_freelist_ea_popping_and_pushing.c libtest_tests_freelist_ea_pushing.c libtest_tests_freelist_ea_rapid_popping_and_pushing.c libtest_tests_freelist_without_ea_popping.c libtest_tests_freelist_without_ea_popping_and_pushing.c libtest_tests_freelist_without_ea_pushing.c libtest_tests_freelist_without_ea_rapid_popping_and_pushing.c \
+ libtest_tests_hash_addonly_alignment.c libtest_tests_hash_addonly_iterate.c libtest_tests_hash_addonly_random_adds_fail.c libtest_tests_hash_addonly_fail_and_overwrite.c libtest_tests_hash_addonly_random_adds_overwrite.c \
+ libtest_tests_list_addonly_singlylinked_ordered_alignment.c libtest_tests_list_addonly_singlylinked_ordered_new_ordered.c libtest_tests_list_addonly_singlylinked_ordered_new_ordered_with_cursor.c \
+ libtest_tests_list_addonly_singlylinked_unordered_alignment.c libtest_tests_list_addonly_singlylinked_unordered_new_after.c libtest_tests_list_addonly_singlylinked_unordered_new_end.c libtest_tests_list_addonly_singlylinked_unordered_new_start.c \
+ libtest_tests_porting_abstraction_layer_atomic_add.c libtest_tests_porting_abstraction_layer_atomic_cas.c libtest_tests_porting_abstraction_layer_atomic_dwcas.c libtest_tests_porting_abstraction_layer_atomic_exchange.c \
+ libtest_tests_prng_alignment.c libtest_tests_prng_generate.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_alignment.c libtest_tests_queue_bounded_manyproducer_manyconsumer_count.c libtest_tests_queue_bounded_manyproducer_manyconsumer_enqueuing.c libtest_tests_queue_bounded_manyproducer_manyconsumer_dequeuing.c libtest_tests_queue_bounded_manyproducer_manyconsumer_enqueuing_and_dequeuing.c libtest_tests_queue_bounded_manyproducer_manyconsumer_rapid_enqueuing_and_dequeuing.c \
+ libtest_tests_queue_bounded_singleproducer_singleconsumer_dequeuing.c libtest_tests_queue_bounded_singleproducer_singleconsumer_enqueuing.c libtest_tests_queue_bounded_singleproducer_singleconsumer_enqueuing_and_dequeuing.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_alignment.c libtest_tests_queue_unbounded_manyproducer_manyconsumer_dequeuing.c libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing.c libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing_and_dequeuing.c libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing_with_malloc_and_dequeuing_with_free.c libtest_tests_queue_unbounded_manyproducer_manyconsumer_rapid_enqueuing_and_dequeuing.c \
+ libtest_tests_ringbuffer_reading.c libtest_tests_ringbuffer_reading_and_writing.c libtest_tests_ringbuffer_writing.c \
+ libtest_tests_stack_alignment.c libtest_tests_stack_popping.c libtest_tests_stack_popping_and_pushing.c libtest_tests_stack_pushing.c libtest_tests_stack_rapid_popping_and_pushing.c \
+ libtest_testsuite_cleanup.c libtest_testsuite_init.c libtest_testsuite_run.c \
+ libtest_threadset_add.c libtest_threadset_cleanup.c libtest_threadset_init.c libtest_threadset_operations.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))
+SYSLIBS := kernel32.lib
+USRLIBS := ..\..\..\..\liblfds710\bin\liblfds710.lib ..\..\..\libshared\bin\libshared.lib
+
+##### default paths fix up #####
+INCDIRS := $(patsubst %,%;,$(INCDIR))
+INCLUDE += $(subst $(SPACE),,$(INCDIRS))
+
+##### tools #####
+CC := cl
+CFLAGS_MANDATORY := /c "/Fd$(BINDIR)\$(BINNAME).pdb" /wd 4068
+CFLAGS_OPTIONAL := /DWIN32_LEAN_AND_MEAN /DUNICODE /D_UNICODE /nologo /W4 /WX
+CFLAGS_MANDATORY_DBG := /Od /Gm /Zi /D_DEBUG
+CFLAGS_MANDATORY_REL := /Ox /DNDEBUG
+
+AR := lib
+ARFLAGS :=
+ARFLAGS_MANDATORY := /subsystem:console
+ARFLAGS_OPTIONAL := /nologo /wx /verbose
+
+LD := link
+LDFLAGS_MANDATORY := /def:$(BINNAME).def /dll /nodefaultlib /subsystem:console
+LDFLAGS_OPTIONAL := /nologo /nxcompat /wx
+LDFLAGS_MANDATORY_DBG := /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"
+LDFLAGS_MANDATORY_REL := /incremental:no
+
+##### variants #####
+ifeq ($(MAKECMDGOALS),) # TRD : default to debug lib
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MTd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),libdbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MTd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),librel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL) /MT
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+ CLIB := libcmt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dlldbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MDd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := msvcrtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dllrel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL) /MD
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+ CLIB := msvcrt.lib
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%;,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.obj : %.c
+ $(CC) $(CFLAGS_OPTIONAL) $(CFLAGS) $(CFLAGS_MANDATORY) "/Fo$@" $<
+
+##### explicit rules #####
+$(LIB_BINARY) : $(OBJECTS)
+ $(AR) $(ARFLAGS_OPTIONAL) $(ARFLAGS) $(ARFLAGS_MANDATORY) $(OBJECTS) /out:$(LIB_BINARY)
+
+$(DLL_BINARY) : $(OBJECTS)
+ $(LD) $(LDFLAGS_OPTIONAL) $(LDFLAGS) $(LDFLAGS_MANDATORY) $(CLIB) $(SYSLIBS) $(USRLIBS) $(OBJECTS) /out:$(DLL_BINARY)
+
+##### phony #####
+.PHONY : clean librel libdbg dllrel dlldbg
+
+clean :
+ @erase /Q $(BINDIR)\$(BINNAME).* $(OBJDIR)\*.obj $(QUIETLY)
+
+dllrel : $(DLL_BINARY)
+dlldbg : $(DLL_BINARY)
+
+librel : $(LIB_BINARY)
+libdbg : $(LIB_BINARY)
+
+##### notes #####
+# /wd 4068 : turn off "unknown pragma" warning
+
--- /dev/null
+DIRS = single_dir_for_windows_kernel
+
+
--- /dev/null
+#include "libtest_internal.h"
+
+
+
+
+
+/****************************************************************************/
+DRIVER_INITIALIZE DriverEntry;
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+NTSTATUS DriverEntry( struct _DRIVER_OBJECT *DriverObject, PUNICODE_STRING RegistryPath )
+{
+ return STATUS_SUCCESS;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+EXPORTS
+
+libtest_results_init = libtest_results_init
+libtest_results_cleanup = libtest_results_cleanup
+libtest_results_get_result = libtest_results_get_result
+
+libtest_testsuite_init = libtest_testsuite_init
+libtest_testsuite_cleanup = libtest_testsuite_cleanup
+libtest_testsuite_run = libtest_testsuite_run
+
+libtest_misc_pal_helper_add_logical_processor_to_list_of_logical_processors = libtest_misc_pal_helper_add_logical_processor_to_list_of_logical_processors
+libtest_misc_determine_erg = libtest_misc_determine_erg
+libtest_misc_query = libtest_misc_query
+
--- /dev/null
+The Windows kernel build environment is primitive and has a number
+of severe limitations; in particular, all source files must be in
+one directory and it is not possible to choose the output binary type
+(static or dynamic library) from the build command line; rather,
+a string has to be modified in a text file used by the build (!)
+
+To deal with these limitations, it is necessary for a Windows kernel
+build to run a batch file prior to building.
+
+There are two batch files, one for static library builds and the other
+for dynamic library builds.
+
+They are both idempotent; you can run them as often as you like and
+switch between them as often as you want. It's all fine; whenever
+you run one of them, it will take you from whatever state you were
+previously in, into the state you want to be in.
+
+Both batch files copy all the sources file into a single directory,
+"/src/single_dir_for_windows_kernel/".
+
+The static library batch file will then copy "/sources.static" into
+"/src/single_dir_for_windows_kernel/", which will cause a static
+library to be built.
+
+The dynamic library batch file will then copy "/sources.dynamic" into
+"/src/single_dir_for_windows_kernel/", which will cause a dynamic
+library to be built. It will also copy "src/driver_entry.c" into
+"/src/single_dir_for_windows_kernel/", since the linker requires
+the DriverEntry function to exist for dynamic libraries, even
+though it's not used.
+
+
--- /dev/null
+@echo off
+rmdir /q /s single_dir_for_windows_kernel 1>nul 2>nul
+mkdir single_dir_for_windows_kernel 1>nul 2>nul
+
+copy /y ..\..\src\libtest_misc\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libtest_porting_abstraction_layer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libtest_results\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libtest_test\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libtest_tests\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libtest_testsuite\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libtest_threadset\* single_dir_for_windows_kernel\ 1>nul 2>nul
+
+copy /y ..\..\src\libtest_internal.h single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y driver_entry_renamed_to_avoid_compiler_warning.c single_dir_for_windows_kernel\driver_entry.c 1>nul 2>nul
+copy /y sources.dynamic single_dir_for_windows_kernel\sources 1>nul 2>nul
+
+echo Windows kernel dynamic library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+@echo off
+rmdir /q /s single_dir_for_windows_kernel 1>nul 2>nul
+mkdir single_dir_for_windows_kernel 1>nul 2>nul
+
+copy /y ..\..\src\libtest_misc\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libtest_porting_abstraction_layer\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libtest_results\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libtest_test\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libtest_tests\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libtest_testsuite\* single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y ..\..\src\libtest_threadset\* single_dir_for_windows_kernel\ 1>nul 2>nul
+
+copy /y ..\..\src\libtest_internal.h single_dir_for_windows_kernel\ 1>nul 2>nul
+copy /y sources.static single_dir_for_windows_kernel\sources 1>nul 2>nul
+
+echo Windows kernel static library build directory structure created.
+echo (Note the effects of this batch file are idempotent).
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /wd4127 /W4
+DLLDEF = ../libtest.def
+TARGETNAME = libtest
+TARGETPATH = ../../../bin/
+TARGETTYPE = EXPORT_DRIVER
+UMTYPE = nt
+USER_C_FLAGS = /DKERNEL_MODE /DNDEBUG
+
+INCLUDES = ../../../inc/
+SOURCES = libtest_misc_determine_erg.c \
+ libtest_misc_globals.c \
+ libtest_misc_memory_helpers.c \
+ libtest_misc_pal_helpers.c \
+ libtest_misc_query.c \
+ libtest_porting_abstraction_layer_free.c \
+ libtest_porting_abstraction_layer_get_full_logical_processor_set.c \
+ libtest_porting_abstraction_layer_malloc.c \
+ libtest_results_cleanup.c \
+ libtest_results_get_result.c \
+ libtest_results_init.c \
+ libtest_results_put_result.c \
+ libtest_test_cleanup.c \
+ libtest_test_init.c \
+ libtest_test_run.c \
+ libtest_tests_btree_addonly_unbalanced_alignment.c \
+ libtest_tests_btree_addonly_unbalanced_random_adds_fail.c \
+ libtest_tests_btree_addonly_unbalanced_random_adds_fail_and_overwrite.c \
+ libtest_tests_btree_addonly_unbalanced_random_adds_overwrite.c \
+ libtest_tests_freelist_alignment.c \
+ libtest_tests_freelist_ea_popping.c \
+ libtest_tests_freelist_ea_popping_and_pushing.c \
+ libtest_tests_freelist_ea_pushing.c \
+ libtest_tests_freelist_ea_rapid_popping_and_pushing.c \
+ libtest_tests_freelist_without_ea_popping.c \
+ libtest_tests_freelist_without_ea_popping_and_pushing.c \
+ libtest_tests_freelist_without_ea_pushing.c \
+ libtest_tests_freelist_without_ea_rapid_popping_and_pushing.c \
+ libtest_tests_hash_addonly_alignment.c \
+ libtest_tests_hash_addonly_iterate.c \
+ libtest_tests_hash_addonly_random_adds_fail.c \
+ libtest_tests_hash_addonly_fail_and_overwrite.c \
+ libtest_tests_hash_addonly_random_adds_overwrite.c \
+ libtest_tests_list_addonly_singlylinked_ordered_alignment.c \
+ libtest_tests_list_addonly_singlylinked_ordered_new_ordered.c \
+ libtest_tests_list_addonly_singlylinked_ordered_new_ordered_with_cursor.c \
+ libtest_tests_list_addonly_singlylinked_unordered_alignment.c \
+ libtest_tests_list_addonly_singlylinked_unordered_new_after.c \
+ libtest_tests_list_addonly_singlylinked_unordered_new_end.c \
+ libtest_tests_list_addonly_singlylinked_unordered_new_start.c \
+ libtest_tests_porting_abstraction_layer_atomic_add.c \
+ libtest_tests_porting_abstraction_layer_atomic_cas.c \
+ libtest_tests_porting_abstraction_layer_atomic_dwcas.c \
+ libtest_tests_porting_abstraction_layer_atomic_exchange.c \
+ libtest_tests_prng_alignment.c \
+ libtest_tests_prng_generate.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_alignment.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_count.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_enqueuing.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_dequeuing.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_enqueuing_and_dequeuing.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_rapid_enqueuing_and_dequeuing.c \
+ libtest_tests_queue_bounded_singleproducer_singleconsumer_dequeuing.c \
+ libtest_tests_queue_bounded_singleproducer_singleconsumer_enqueuing.c \
+ libtest_tests_queue_bounded_singleproducer_singleconsumer_enqueuing_and_dequeuing.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_alignment.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_dequeuing.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing_and_dequeuing.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing_with_malloc_and_dequeuing_with_free.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_rapid_enqueuing_and_dequeuing.c \
+ libtest_tests_ringbuffer_reading.c \
+ libtest_tests_ringbuffer_reading_and_writing.c \
+ libtest_tests_ringbuffer_writing.c \
+ libtest_tests_stack_alignment.c \
+ libtest_tests_stack_popping.c \
+ libtest_tests_stack_popping_and_pushing.c \
+ libtest_tests_stack_pushing.c \
+ libtest_tests_stack_rapid_popping_and_pushing.c \
+ libtest_testsuite_cleanup.c \
+ libtest_testsuite_init.c \
+ libtest_testsuite_run.c \
+ libtest_threadset_add.c \
+ libtest_threadset_cleanup.c \
+ libtest_threadset_init.c \
+ libtest_threadset_operations.c \
+ driver_entry.c
+
--- /dev/null
+MSC_WARNING_LEVEL = /WX /wd4127 /W4
+TARGETNAME = libtest
+TARGETPATH = ../../../bin/
+TARGETTYPE = DRIVER_LIBRARY
+UMTYPE = nt
+USER_C_FLAGS = /DKERNEL_MODE /DNDEBUG
+
+INCLUDES = ../../../inc/
+SOURCES = libtest_misc_determine_erg.c \
+ libtest_misc_globals.c \
+ libtest_misc_memory_helpers.c \
+ libtest_misc_pal_helpers.c \
+ libtest_misc_query.c \
+ libtest_porting_abstraction_layer_free.c \
+ libtest_porting_abstraction_layer_get_full_logical_processor_set.c \
+ libtest_porting_abstraction_layer_malloc.c \
+ libtest_results_cleanup.c \
+ libtest_results_get_result.c \
+ libtest_results_init.c \
+ libtest_results_put_result.c \
+ libtest_test_cleanup.c \
+ libtest_test_init.c \
+ libtest_test_run.c \
+ libtest_tests_btree_addonly_unbalanced_alignment.c \
+ libtest_tests_btree_addonly_unbalanced_random_adds_fail.c \
+ libtest_tests_btree_addonly_unbalanced_random_adds_fail_and_overwrite.c \
+ libtest_tests_btree_addonly_unbalanced_random_adds_overwrite.c \
+ libtest_tests_freelist_alignment.c \
+ libtest_tests_freelist_ea_popping.c \
+ libtest_tests_freelist_ea_popping_and_pushing.c \
+ libtest_tests_freelist_ea_pushing.c \
+ libtest_tests_freelist_ea_rapid_popping_and_pushing.c \
+ libtest_tests_freelist_without_ea_popping.c \
+ libtest_tests_freelist_without_ea_popping_and_pushing.c \
+ libtest_tests_freelist_without_ea_pushing.c \
+ libtest_tests_freelist_without_ea_rapid_popping_and_pushing.c \
+ libtest_tests_hash_addonly_alignment.c \
+ libtest_tests_hash_addonly_iterate.c \
+ libtest_tests_hash_addonly_random_adds_fail.c \
+ libtest_tests_hash_addonly_fail_and_overwrite.c \
+ libtest_tests_hash_addonly_random_adds_overwrite.c \
+ libtest_tests_list_addonly_singlylinked_ordered_alignment.c \
+ libtest_tests_list_addonly_singlylinked_ordered_new_ordered.c \
+ libtest_tests_list_addonly_singlylinked_ordered_new_ordered_with_cursor.c \
+ libtest_tests_list_addonly_singlylinked_unordered_alignment.c \
+ libtest_tests_list_addonly_singlylinked_unordered_new_after.c \
+ libtest_tests_list_addonly_singlylinked_unordered_new_end.c \
+ libtest_tests_list_addonly_singlylinked_unordered_new_start.c \
+ libtest_tests_porting_abstraction_layer_atomic_add.c \
+ libtest_tests_porting_abstraction_layer_atomic_cas.c \
+ libtest_tests_porting_abstraction_layer_atomic_dwcas.c \
+ libtest_tests_porting_abstraction_layer_atomic_exchange.c \
+ libtest_tests_prng_alignment.c \
+ libtest_tests_prng_generate.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_alignment.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_count.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_enqueuing.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_dequeuing.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_enqueuing_and_dequeuing.c \
+ libtest_tests_queue_bounded_manyproducer_manyconsumer_rapid_enqueuing_and_dequeuing.c \
+ libtest_tests_queue_bounded_singleproducer_singleconsumer_dequeuing.c \
+ libtest_tests_queue_bounded_singleproducer_singleconsumer_enqueuing.c \
+ libtest_tests_queue_bounded_singleproducer_singleconsumer_enqueuing_and_dequeuing.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_alignment.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_dequeuing.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing_and_dequeuing.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_enqueuing_with_malloc_and_dequeuing_with_free.c \
+ libtest_tests_queue_unbounded_manyproducer_manyconsumer_rapid_enqueuing_and_dequeuing.c \
+ libtest_tests_ringbuffer_reading.c \
+ libtest_tests_ringbuffer_reading_and_writing.c \
+ libtest_tests_ringbuffer_writing.c \
+ libtest_tests_stack_alignment.c \
+ libtest_tests_stack_popping.c \
+ libtest_tests_stack_popping_and_pushing.c \
+ libtest_tests_stack_pushing.c \
+ libtest_tests_stack_rapid_popping_and_pushing.c \
+ libtest_testsuite_cleanup.c \
+ libtest_testsuite_init.c \
+ libtest_testsuite_run.c \
+ libtest_threadset_add.c \
+ libtest_threadset_cleanup.c \
+ libtest_threadset_init.c \
+ libtest_threadset_operations.c
+
--- /dev/null
+#ifndef LIBTEST_H
+
+ /***** defines *****/
+ #define LIBTEST_H
+
+ /***** enums *****/
+
+ /***** porting includes *****/
+ #include "libtest/libtest_porting_abstraction_layer_operating_system.h"
+ #include "libtest/libtest_porting_abstraction_layer_compiler.h"
+
+ /***** extermal includes *****/
+ #include "../../../liblfds710/inc/liblfds710.h"
+ #include "../../libshared/inc/libshared.h"
+
+ /***** includes *****/
+ #include "libtest/libtest_porting_abstraction_layer.h"
+ #include "libtest/libtest_misc.h"
+ #include "libtest/libtest_tests.h"
+ #include "libtest/libtest_test.h" // TRD : test depends on tests
+ #include "libtest/libtest_results.h" // TRD : results depends on tests
+ #include "libtest/libtest_testsuite.h"
+ #include "libtest/libtest_threadset.h"
+
+#endif
+
--- /dev/null
+/***** defines *****/
+#define LIBTEST_MISC_VERSION_STRING "7.1.0"
+#define LIBTEST_MISC_VERSION_INTEGER 710
+
+#define LIBTEST_MISC_OFFSETOF( structure, member ) (lfds710_pal_uint_t) ( ( &( (structure *) NULL )->member ) )
+
+/***** enums *****/
+enum libtest_misc_determine_erg_result
+{
+ LIBTEST_MISC_DETERMINE_ERG_RESULT_SUCCESS,
+ LIBTEST_MISC_DETERMINE_ERG_RESULT_ONE_PHYSICAL_CORE,
+ LIBTEST_MISC_DETERMINE_ERG_RESULT_ONE_PHYSICAL_CORE_OR_NO_LLSC,
+ LIBTEST_MISC_DETERMINE_ERG_RESULT_NO_LLSC,
+ LIBTEST_MISC_DETERMINE_ERG_RESULT_NOT_SUPPORTED
+};
+
+enum libtest_misc_query
+{
+ LIBTEST_MISC_QUERY_GET_BUILD_AND_VERSION_STRING
+};
+
+/***** externs *****/
+extern char
+ *libtest_misc_global_validity_names[];
+
+/***** public prototypes *****/
+void *libtest_misc_aligned_malloc( lfds710_pal_uint_t size, lfds710_pal_uint_t align_in_bytes );
+void libtest_misc_aligned_free( void *memory );
+
+void libtest_misc_determine_erg( struct libshared_memory_state *ms,
+ lfds710_pal_uint_t (*count_array)[10],
+ enum libtest_misc_determine_erg_result *der,
+ lfds710_pal_uint_t *erg_length_in_bytes );
+
+void libtest_misc_pal_helper_add_logical_processor_to_list_of_logical_processors( struct lfds710_list_asu_state *list_of_logical_processors,
+ struct libshared_memory_state *ms,
+ lfds710_pal_uint_t logical_processor_number,
+ lfds710_pal_uint_t windows_processor_group_number );
+
+void libtest_misc_query( enum libtest_misc_query query_type,
+ void *query_input,
+ void *query_output );
+
--- /dev/null
+/***** defines *****/
+#define LIBTEST_PAL_SET_LOGICAL_PROCESSOR_NUMBER( libtest_logical_processor, logical_processor_number ) (libtest_logical_processor).logical_processor_number = logical_processor_number
+#define LIBTEST_PAL_SET_WINDOWS_PROCESSOR_GROUP_NUMBER( libtest_logical_processor, windows_processor_group_number ) (libtest_logical_processor).windows_processor_group_number = windows_processor_group_number
+
+/***** structs *****/
+struct libtest_logical_processor
+{
+ lfds710_pal_uint_t
+ logical_processor_number,
+ windows_processor_group_number;
+
+ struct lfds710_list_asu_element
+ lasue;
+};
+
+struct libtest_thread_state
+{
+ libshared_pal_thread_handle_t
+ thread_state;
+
+ libshared_pal_thread_return_t
+ (LIBSHARED_PAL_THREAD_CALLING_CONVENTION *thread_function)( void *thread_user_state );
+
+ struct libtest_logical_processor
+ lp;
+
+ void
+ *thread_user_state;
+};
+
+/***** public prototypes *****/
+void libtest_pal_get_full_logical_processor_set( struct lfds710_list_asu_state *lasus, struct libshared_memory_state *ms );
+
+void *libtest_pal_malloc( lfds710_pal_uint_t size );
+void libtest_pal_free( void *memory );
+
--- /dev/null
+/****************************************************************************/
+#if( defined __GNUC__ )
+
+ #ifdef LIBTEST_PAL_COMPILER
+ #error More than one porting abstraction layer matches the current platform in "libtest_porting_abstraction_layer_compiler.h".
+ #endif
+
+ #define LIBTEST_PAL_COMPILER
+
+ #if( defined __arm__ )
+ // TRD : lfds710_pal_uint_t destination, lfds710_pal_uint_t *source
+ #define LIBTEST_PAL_LOAD_LINKED( destination, source ) \
+ { \
+ __asm__ __volatile__ \
+ ( \
+ "ldrex %[alias_dst], [%[alias_src]];" \
+ : [alias_dst] "=r" (destination) \
+ : [alias_src] "r" (source) \
+ ); \
+ }
+
+ // TRD : lfds710_pal_uint_t *destination, lfds710_pal_uint_t source, lfds710_pal_uint_t stored_flag
+ #define LIBTEST_PAL_STORE_CONDITIONAL( destination, source, stored_flag ) \
+ { \
+ __asm__ __volatile__ \
+ ( \
+ "strex %[alias_sf], %[alias_src], [%[alias_dst]];" \
+ : "=m" (*destination), \
+ [alias_sf] "=&r" (stored_flag) \
+ : [alias_src] "r" (source), \
+ [alias_dst] "r" (destination) \
+ ); \
+ }
+ #endif
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && defined KERNEL_MODE && defined _M_IX86 )
+
+ #define LIBTEST_PAL_PROCESSOR
+
+ // TRD : bloody x86 on MSVC...
+
+ #define LIBTEST_PAL_STDLIB_CALLBACK_CALLING_CONVENTION __cdecl
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBTEST_PAL_PROCESSOR )
+
+ #define LIBTEST_PAL_STDLIB_CALLBACK_CALLING_CONVENTION
+
+#endif
+
--- /dev/null
+/****************************************************************************/
+#if( defined _WIN32 && NTDDI_VERSION >= NTDDI_WINXP && !defined KERNEL_MODE )
+
+ #ifdef LIBTEST_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform in "libtest_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LIBTEST_PAL_OPERATING_SYSTEM
+
+ #include <stdlib.h>
+ #include <time.h>
+ #include <windows.h>
+
+ #define LIBTEST_PAL_OS_STRING "Windows"
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && NTDDI_VERSION >= NTDDI_WINXP && defined KERNEL_MODE )
+
+ #ifdef LIBTEST_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform in "libtest_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LIBTEST_PAL_OPERATING_SYSTEM
+
+ #include <stdlib.h>
+ #include <time.h>
+ #include <wdm.h>
+
+ #define LIBTEST_PAL_OS_STRING "Windows"
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && !defined KERNEL_MODE )
+
+ #ifdef LIBTEST_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform in "libtest_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LIBTEST_PAL_OPERATING_SYSTEM
+
+ #define _GNU_SOURCE
+
+ #include <unistd.h>
+ #include <stdio.h>
+ #include <stdlib.h>
+ #include <time.h>
+
+ #define LIBTEST_PAL_OS_STRING "Linux"
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && defined KERNEL_MODE )
+
+ #ifdef LIBTEST_PAL_OPERATING_SYSTEM
+ #error More than one porting abstraction layer matches current platform in "libtest_porting_abstraction_layer_operating_system.h".
+ #endif
+
+ #define LIBTEST_PAL_OPERATING_SYSTEM
+
+ #error libtest not quite yet ready for Linux kernel - it uses time() all over the place
+
+ #define LIBTEST_PAL_OS_STRING "Linux"
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBTEST_PAL_OPERATING_SYSTEM )
+
+ #error No matching porting abstraction layer in "libtest_porting_abstraction_layer_operating_system.h".
+
+#endif
+
--- /dev/null
+/***** defines *****/
+
+/***** enums *****/
+
+/***** structs *****/
+struct libtest_results_state
+{
+ enum lfds710_misc_validity
+ dvs[LIBTEST_TEST_ID_COUNT];
+};
+
+/***** public prototypes *****/
+void libtest_results_init( struct libtest_results_state *rs );
+void libtest_results_cleanup( struct libtest_results_state *rs );
+
+void libtest_results_put_result( struct libtest_results_state *rs,
+ enum libtest_test_id test_id,
+ enum lfds710_misc_validity result );
+void libtest_results_get_result( struct libtest_results_state *rs,
+ enum libtest_test_id test_id,
+ enum lfds710_misc_validity *result );
+
--- /dev/null
+/***** defines *****/
+
+/***** structs *****/
+struct libtest_test_state
+{
+ char
+ *name;
+
+ enum libtest_test_id
+ test_id;
+
+ void
+ (*test_function)( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+};
+
+/***** public prototypes *****/
+void libtest_test_init( struct libtest_test_state *ts,
+ char *name,
+ enum libtest_test_id test_id,
+ void (*test_function)(struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs) );
+
+void libtest_test_cleanup( struct libtest_test_state *ts );
+
+void libtest_test_run( struct libtest_test_state *ts,
+ struct lfds710_list_asu_state *list_of_logical_processors,
+ struct libshared_memory_state *ms,
+ enum lfds710_misc_validity *dvs );
+
--- /dev/null
+/***** enums *****/
+enum libtest_test_id
+{
+ LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_ADD,
+ LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_CAS,
+ LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_DCAS,
+ LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_EXCHANGE,
+
+ LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_ALIGNMENT,
+ LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_RANDOM_ADDS_FAIL,
+ LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_RANDOM_ADDS_FAIL_AND_OVERWRITE,
+ LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_RANDOM_ADDS_OVERWRITE,
+
+ LIBTEST_TEST_ID_FREELIST_ALIGNMENT,
+ LIBTEST_TEST_ID_FREELIST_EA_POPPING,
+ LIBTEST_TEST_ID_FREELIST_EA_POPPING_AND_PUSHING,
+ LIBTEST_TEST_ID_FREELIST_EA_PUSHING,
+ LIBTEST_TEST_ID_FREELIST_EA_RAPID_POPPING_AND_PUSHING,
+ LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_POPPING,
+ LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_POPPING_AND_PUSHING,
+ LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_PUSHING,
+ LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_RAPID_POPPING_AND_PUSHING,
+
+ LIBTEST_TEST_ID_HASH_ADDONLY_ALIGNMENT,
+ LIBTEST_TEST_ID_HASH_ADDONLY_FAIL_AND_OVERWRITE,
+ LIBTEST_TEST_ID_HASH_ADDONLY_RANDOM_ADDS_FAIL,
+ LIBTEST_TEST_ID_HASH_ADDONLY_RANDOM_ADDS_OVERWRITE,
+ LIBTEST_TEST_ID_HASH_ADDONLY_ITERATE,
+
+ LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_ORDERED_ALIGNMENT,
+ LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_ORDERED_NEW_ORDERED,
+ LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_ORDERED_NEW_ORDERED_WITH_CURSOR,
+
+ LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_ALIGNMENT,
+ LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_NEW_START,
+ LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_NEW_END,
+ LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_NEW_AFTER,
+
+ LIBTEST_TEST_ID_PRNG_ALIGNMENT,
+ LIBTEST_TEST_ID_PRNG_GENERATE,
+
+ LIBTEST_TEST_ID_QUEUE_UMM_ALIGNMENT,
+ LIBTEST_TEST_ID_QUEUE_UMM_DEQUEUING,
+ LIBTEST_TEST_ID_QUEUE_UMM_ENQUEUING,
+ LIBTEST_TEST_ID_QUEUE_UMM_ENQUEUING_AND_DEQUEUING,
+ LIBTEST_TEST_ID_QUEUE_UMM_ENQUEUING_WITH_MALLOC_AND_DEQUEUING_WITH_FREE,
+ LIBTEST_TEST_ID_QUEUE_UMM_RAPID_ENQUEUING_AND_DEQUEUING,
+
+ LIBTEST_TEST_ID_QUEUE_BMM_ALIGNMENT,
+ LIBTEST_TEST_ID_QUEUE_BMM_COUNT,
+ LIBTEST_TEST_ID_QUEUE_BMM_ENQUEUING,
+ LIBTEST_TEST_ID_QUEUE_BMM_DEQUEUING,
+ LIBTEST_TEST_ID_QUEUE_BMM_ENQUEUING_AND_DEQUEUING,
+ LIBTEST_TEST_ID_QUEUE_BMM_RAPID_ENQUEUING_AND_DEQUEUING,
+
+ LIBTEST_TEST_ID_QUEUE_BSS_DEQUEUING,
+ LIBTEST_TEST_ID_QUEUE_BSS_ENQUEUING,
+ LIBTEST_TEST_ID_QUEUE_BSS_ENQUEUING_AND_DEQUEUING,
+
+ LIBTEST_TEST_ID_RINGBUFFER_READING,
+ LIBTEST_TEST_ID_RINGBUFFER_WRITING,
+ LIBTEST_TEST_ID_RINGBUFFER_READING_AND_WRITING,
+
+ LIBTEST_TEST_ID_STACK_ALIGNMENT,
+ LIBTEST_TEST_ID_STACK_POPPING,
+ LIBTEST_TEST_ID_STACK_POPPING_AND_PUSHING,
+ LIBTEST_TEST_ID_STACK_PUSHING,
+ LIBTEST_TEST_ID_STACK_RAPID_POPPING_AND_PUSHING,
+
+ LIBTEST_TEST_ID_COUNT
+};
+
+/***** public prototypes *****/
+void libtest_tests_btree_au_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_btree_au_random_adds_fail_on_existing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_btree_au_random_adds_overwrite_on_existing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_btree_au_fail_and_overwrite_on_existing_key( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+
+void libtest_tests_freelist_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_freelist_ea_popping( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_freelist_ea_popping_and_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_freelist_ea_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_freelist_ea_rapid_popping_and_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_freelist_without_ea_popping( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_freelist_without_ea_popping_and_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_freelist_without_ea_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_freelist_without_ea_rapid_popping_and_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+
+void libtest_tests_hash_a_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_hash_a_fail_and_overwrite_on_existing_key( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_hash_a_random_adds_fail_on_existing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_hash_a_random_adds_overwrite_on_existing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_hash_a_iterate( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+
+void libtest_tests_list_aso_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_list_aso_new_ordered( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_list_aso_new_ordered_with_cursor( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+
+void libtest_tests_list_asu_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_list_asu_new_start( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_list_asu_new_after( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_list_asu_new_end( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+
+void libtest_tests_pal_atomic_add( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_pal_atomic_cas( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_pal_atomic_dwcas( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_pal_atomic_exchange( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+
+void libtest_tests_prng_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_prng_generate( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+
+void libtest_tests_queue_bmm_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_queue_bmm_count( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_queue_bmm_enqueuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_queue_bmm_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_queue_bmm_enqueuing_and_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_queue_bmm_rapid_enqueuing_and_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+
+void libtest_tests_queue_bss_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_queue_bss_enqueuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_queue_bss_enqueuing_and_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+
+void libtest_tests_queue_umm_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_queue_umm_enqueuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_queue_umm_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_queue_umm_enqueuing_and_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_queue_umm_enqueuing_with_malloc_and_dequeuing_with_free( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_queue_umm_rapid_enqueuing_and_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+
+void libtest_tests_ringbuffer_reading( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_ringbuffer_writing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_ringbuffer_reading_and_writing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+
+void libtest_tests_stack_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_stack_popping( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_stack_popping_and_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_stack_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+void libtest_tests_stack_rapid_popping_and_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs );
+
+
--- /dev/null
+/***** defines *****/
+
+/***** enums *****/
+
+/***** structs *****/
+struct libtest_testsuite_state
+{
+ enum flag
+ test_available_flag[LIBTEST_TEST_ID_COUNT];
+
+ struct lfds710_list_asu_state
+ list_of_logical_processors;
+
+ struct libshared_memory_state
+ *ms;
+
+ struct libtest_test_state
+ tests[LIBTEST_TEST_ID_COUNT];
+
+ void
+ (*callback_test_start)( char *test_name ),
+ (*callback_test_finish)( char *result );
+};
+
+/***** public prototypes *****/
+void libtest_testsuite_init( struct libtest_testsuite_state *ts,
+ struct libshared_memory_state *ms,
+ void (*callback_test_start)(char *test_name),
+ void (*callback_test_finish)(char *result) );
+void libtest_testsuite_cleanup( struct libtest_testsuite_state *ts );
+
+void libtest_testsuite_run( struct libtest_testsuite_state *ts,
+ struct libtest_results_state *rs );
+
--- /dev/null
+/***** defines *****/
+#define LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( libtest_threadset_per_thread_state ) (libtest_threadset_per_thread_state).user_state
+#define LIBTEST_THREADSET_SET_USER_STATE( libtest_threadtest_state, userstate ) (libtest_threadtest_state).user_state = userstate
+
+/***** structs *****/
+struct libtest_threadset_per_thread_state
+{
+ enum flag volatile
+ thread_ready_flag,
+ *threadset_start_flag;
+
+ libshared_pal_thread_handle_t
+ thread_handle;
+
+ struct lfds710_list_asu_element
+ lasue;
+
+ struct libshared_pal_thread_info
+ pti;
+
+ struct libtest_threadset_state
+ *ts;
+
+ void
+ *user_state;
+};
+
+struct libtest_threadset_state
+{
+ enum flag volatile
+ threadset_start_flag;
+
+ struct lfds710_list_asu_state
+ list_of_per_thread_states;
+
+ struct libshared_memory_state
+ *ms;
+
+ void
+ *user_state;
+};
+
+/***** prototypes *****/
+void libtest_threadset_init( struct libtest_threadset_state *ts,
+ void *user_state );
+void libtest_threadset_cleanup( struct libtest_threadset_state *ts );
+
+void libtest_threadset_add_thread( struct libtest_threadset_state *ts,
+ struct libtest_threadset_per_thread_state *pts,
+ struct libtest_logical_processor *lp,
+ libshared_pal_thread_return_t (LIBSHARED_PAL_THREAD_CALLING_CONVENTION *thread_function)( void *thread_user_state ),
+ void *user_state );
+
+void libtest_threadset_run( struct libtest_threadset_state *ts );
+void libtest_threadset_thread_ready_and_wait( struct libtest_threadset_per_thread_state *pts );
+
--- /dev/null
+/***** public prototypes *****/
+#include "../inc/libtest.h"
+
+/***** defines *****/
+#define and &&
+#define or ||
+
+#define NO_FLAGS 0x0
+
+#define RETURN_SUCCESS 0
+#define RETURN_FAILURE 1
+
+#define BITS_PER_BYTE 8
+
+#define LIBTEST_VERSION_STRING "7.1.0"
+#define LIBTEST_VERSION_INTEGER 710
+
+#if( defined KERNEL_MODE )
+ #define MODE_TYPE_STRING "kernel-mode"
+#endif
+
+#if( !defined KERNEL_MODE )
+ #define MODE_TYPE_STRING "user-mode"
+#endif
+
+#if( defined NDEBUG && !defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "release"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "debug"
+#endif
+
+#if( !defined NDEBUG && defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "coverage"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "threadsanitizer"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN && defined PROF )
+ #define BUILD_TYPE_STRING "profiling"
+#endif
+
+/***** library-wide prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libtest_misc_internal.h"
+
+/***** defines *****/
+#define MAX_ARM_ERG_LENGTH_IN_BYTES 2048
+
+
+
+/****************************************************************************/
+#if( defined LIBTEST_PAL_LOAD_LINKED && defined LIBTEST_PAL_STORE_CONDITIONAL )
+
+ /***** structs *****/
+ struct erg_director_state
+ {
+ enum flag volatile
+ quit_flag;
+
+ lfds710_pal_uint_t
+ (*count_array)[10],
+ number_threads;
+
+ lfds710_pal_uint_t volatile
+ **ack_pointer_array,
+ (*memory_pointer)[ (MAX_ARM_ERG_LENGTH_IN_BYTES+sizeof(lfds710_pal_uint_t)) / sizeof(lfds710_pal_uint_t)],
+ *write_pointer;
+ };
+
+ struct erg_helper_state
+ {
+ enum flag volatile
+ *quit_flag;
+
+ lfds710_pal_uint_t volatile
+ **ack_pointer,
+ **write_pointer;
+
+ lfds710_pal_uint_t
+ thread_number;
+ };
+
+ static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_erg_director( void *libtest_threadset_per_thread_state );
+ static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_erg_helper( void *libtest_threadset_per_thread_state );
+
+ /****************************************************************************/
+ void libtest_misc_determine_erg( struct libshared_memory_state *ms, lfds710_pal_uint_t (*count_array)[10], enum libtest_misc_determine_erg_result *der, lfds710_pal_uint_t *erg_length_in_bytes )
+ {
+ lfds710_pal_uint_t
+ erg_size = 10,
+ loop = 0,
+ number_logical_processors;
+
+ lfds710_pal_uint_t volatile LFDS710_PAL_ALIGN( MAX_ARM_ERG_LENGTH_IN_BYTES )
+ memory[ (MAX_ARM_ERG_LENGTH_IN_BYTES+sizeof(lfds710_pal_uint_t)) / sizeof(lfds710_pal_uint_t)];
+
+ struct erg_director_state
+ *eds;
+
+ struct erg_helper_state
+ *ehs_array;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_list_asu_state
+ list_of_logical_processors;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( count_array != NULL );
+ LFDS710_PAL_ASSERT( der != NULL );
+ LFDS710_PAL_ASSERT( erg_length_in_bytes != NULL );
+
+ /* TRD : ARM chips have a local and a global monitor
+ the local monitor has few guarantees and so you can't figure out ERG from it
+ because you can write even directly TO the LL/SC target and it thinks things are fine
+ so we have to hit the global monitor, which means running threads
+ in test, we know nothing about topology, so we just have to run one thread on each logical core
+ and get them to perform our necessary test memory writes
+
+ the code itself works by having a buffer of 2048 bytes, aligned at 2048 bytes - so it will
+ be aligned with an ERG
+ we LL/SC always on the first word
+ between the LL and SC, we get the other threads to perform a memory write into the buffer
+ the location of the write is the last word in each progressively larger ERG size, i.e.
+ ERG can be 8, 16, 32, 64, etc, so we LL location 0, then write to 1, 3, 7, 15, etc
+ we can then see which ERG sizes work and which fail
+ */
+
+ for( loop = 0 ; loop < 10 ; loop++ )
+ (*count_array)[loop] = 0;
+
+ libtest_pal_get_full_logical_processor_set( &list_of_logical_processors, ms );
+
+ lfds710_list_asu_query( &list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ if( number_logical_processors == 1 )
+ {
+ *der = LIBTEST_MISC_DETERMINE_ERG_RESULT_ONE_PHYSICAL_CORE;
+ return;
+ }
+
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ ehs_array = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct erg_helper_state) * (number_logical_processors-1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ eds = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct erg_director_state), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ eds->ack_pointer_array = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t *) * (number_logical_processors-1), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ eds->quit_flag = LOWERED;
+ eds->count_array = count_array;
+ eds->number_threads = number_logical_processors - 1;
+ eds->write_pointer = NULL;
+ eds->memory_pointer = &memory;
+ for( loop = 0 ; loop < (number_logical_processors-1) ; loop++ )
+ eds->ack_pointer_array[loop] = NULL;
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(list_of_logical_processors, lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ if( loop == number_logical_processors-1 )
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_erg_director, (void *) eds );
+ else
+ {
+ ehs_array[loop].quit_flag = &eds->quit_flag;
+ ehs_array[loop].thread_number = loop;
+ ehs_array[loop].ack_pointer = &eds->ack_pointer_array[loop];
+ ehs_array[loop].write_pointer = &eds->write_pointer;
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_erg_helper, (void *) &ehs_array[loop] );
+ }
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+ lfds710_misc_force_store();
+
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ for( loop = 0 ; loop < 10 ; loop++ )
+ if( count_array[loop] > 0 )
+ erg_size = loop;
+
+ if( erg_size == 0 )
+ *der = LIBTEST_MISC_DETERMINE_ERG_RESULT_ONE_PHYSICAL_CORE_OR_NO_LLSC;
+
+ if( erg_size >= 1 and erg_size <= 9 )
+ {
+ *der = LIBTEST_MISC_DETERMINE_ERG_RESULT_SUCCESS;
+ *erg_length_in_bytes = 1UL < (erg_size+2);
+ }
+
+ if( erg_size == 10 )
+ *der = LIBTEST_MISC_DETERMINE_ERG_RESULT_NO_LLSC;
+
+ return;
+ }
+
+ /****************************************************************************/
+ static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_erg_director( void *libtest_threadset_per_thread_state )
+ {
+ lfds710_pal_uint_t
+ ack_count,
+ count_index,
+ erg_length_in_bytes,
+ loop,
+ register_memory_zero,
+ stored_flag,
+ subloop;
+
+ struct erg_director_state
+ *eds;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+
+ eds = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ for( loop = 0 ; loop < 1024 ; loop++ )
+ for( erg_length_in_bytes = MAX_ARM_ERG_LENGTH_IN_BYTES, count_index = 9 ; erg_length_in_bytes >= 4 ; erg_length_in_bytes /= 2, count_index-- )
+ {
+ LIBTEST_PAL_LOAD_LINKED( register_memory_zero, &(*eds->memory_pointer)[0] );
+
+ eds->write_pointer = &(*eds->memory_pointer)[erg_length_in_bytes / sizeof(lfds710_pal_uint_t)];
+
+ // TRD : wait for all threads to change their ack_pointer to the new write_pointer
+ do
+ {
+ ack_count = 0;
+
+ for( subloop = 0 ; subloop < eds->number_threads ; subloop++ )
+ if( eds->ack_pointer_array[subloop] == eds->write_pointer )
+ {
+ LFDS710_MISC_BARRIER_LOAD; // TRD : yes, really here!
+ ack_count++;
+ }
+ }
+ while( ack_count != eds->number_threads );
+
+ LIBTEST_PAL_STORE_CONDITIONAL( &(*eds->memory_pointer)[0], register_memory_zero, stored_flag );
+
+ if( stored_flag == 0 )
+ (*eds->count_array)[count_index]++;
+ }
+
+ eds->quit_flag = RAISED;
+
+ LFDS710_MISC_BARRIER_STORE;
+ lfds710_misc_force_store();
+
+ return (libshared_pal_thread_return_t) RETURN_SUCCESS;
+ }
+
+ /****************************************************************************/
+ static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_erg_helper( void *libtest_threadset_per_thread_state )
+ {
+ struct erg_helper_state
+ *ehs;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+
+ ehs = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( *ehs->quit_flag == LOWERED )
+ {
+ if( *ehs->write_pointer != NULL )
+ **ehs->write_pointer = ehs->thread_number; // TRD : can be any value though - thread_number just seems nice
+ LFDS710_MISC_BARRIER_STORE;
+ *ehs->ack_pointer = *ehs->write_pointer;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+ lfds710_misc_force_store();
+
+ return (libshared_pal_thread_return_t) RETURN_SUCCESS;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+#if( !defined LIBTEST_PAL_LOAD_LINKED || !defined LIBTEST_PAL_STORE_CONDITIONAL )
+
+ void libtest_misc_determine_erg( struct libshared_memory_state *ms, lfds710_pal_uint_t (*count_array)[10], enum libtest_misc_determine_erg_result *der, lfds710_pal_uint_t *erg_length_in_bytes )
+ {
+ lfds710_pal_uint_t
+ loop;
+
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( count_array != NULL );
+ LFDS710_PAL_ASSERT( der != NULL );
+ LFDS710_PAL_ASSERT( erg_length_in_bytes != NULL );
+
+ for( loop = 0 ; loop < 10 ; loop++ )
+ (*count_array)[loop] = 0;
+
+ *der = LIBTEST_MISC_DETERMINE_ERG_RESULT_NOT_SUPPORTED;
+
+ return;
+ }
+
+#endif
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+char
+ *libtest_misc_global_validity_names[] =
+ {
+ "unknown",
+ "passed",
+ "failed - loop detected",
+ "failed - missing elements",
+ "failed - additional elements",
+ "failed - invalid test output",
+ "failed - invalid test output (ordering)",
+ "failed - atomic failed",
+ "indeterminate - non-atomic passed",
+ };
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libtest_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libtest_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void *libtest_misc_aligned_malloc( lfds710_pal_uint_t size, lfds710_pal_uint_t align_in_bytes )
+{
+ lfds710_pal_uint_t
+ offset;
+
+ void
+ *memory,
+ *original_memory;
+
+ // TRD : size can be any value in its range
+ // TRD : align_in_bytes can be any value in its range
+
+ /* TRD : helper function to provide aligned allocations
+ no porting required
+ */
+
+ original_memory = memory = libtest_pal_malloc( size + sizeof(void *) + align_in_bytes );
+
+ if( memory != NULL )
+ {
+ memory = (void **) memory + 1;
+ offset = align_in_bytes - (lfds710_pal_uint_t) memory % align_in_bytes;
+ memory = (char unsigned *) memory + offset;
+ *( (void **) memory - 1 ) = original_memory;
+ }
+
+ return memory;
+}
+
+
+
+
+
+/****************************************************************************/
+void libtest_misc_aligned_free( void *memory )
+{
+ LFDS710_PAL_ASSERT( memory != NULL );
+
+ // TRD : the "void *" stored above memory points to the root of the allocation
+ libtest_pal_free( *( (void **) memory - 1 ) );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libtest_misc_pal_helper_add_logical_processor_to_list_of_logical_processors( struct lfds710_list_asu_state *list_of_logical_processors,
+ struct libshared_memory_state *ms,
+ lfds710_pal_uint_t logical_processor_number,
+ lfds710_pal_uint_t windows_processor_group_number )
+{
+ struct libtest_logical_processor
+ *lp;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : logical_processor_number can be any value in its range
+ // TRD : windows_processor_group_number can be any value in its range
+
+ lp = libshared_memory_alloc_from_most_free_space_node( ms, sizeof(struct libtest_logical_processor), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ LIBTEST_PAL_SET_LOGICAL_PROCESSOR_NUMBER( *lp, logical_processor_number );
+ LIBTEST_PAL_SET_WINDOWS_PROCESSOR_GROUP_NUMBER( *lp, windows_processor_group_number );
+
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( lp->lasue, lp );
+ lfds710_list_asu_insert_at_start( list_of_logical_processors, &lp->lasue );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_misc_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libtest_misc_query( enum libtest_misc_query query_type, void *query_input, void *query_output )
+{
+ // TRD : query type can be any value in its range
+ // TRD : query_input can be NULL in some cases
+ // TRD : query_outputput can be NULL in some cases
+
+ switch( query_type )
+ {
+ case LIBTEST_MISC_QUERY_GET_BUILD_AND_VERSION_STRING:
+ {
+ char static const
+ * const build_and_version_string = "libtest " LIBTEST_MISC_VERSION_STRING " (" BUILD_TYPE_STRING ", " LIBTEST_PAL_OS_STRING ", " MODE_TYPE_STRING ")";
+
+ LFDS710_PAL_ASSERT( query_input == NULL );
+ LFDS710_PAL_ASSERT( query_output != NULL );
+
+ *(char const **) query_output = build_and_version_string;
+ }
+ break;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_porting_abstraction_layer_internal.h"
+
+/* TRD : libtest_pal_malloc() and libtest_pal_free() are used for and only for
+ one queue_umm test
+
+ if either is not implemented, the test will not run
+
+ that's the only impact of their presence or absence
+*/
+
+
+
+
+
+/****************************************************************************/
+#if( defined _MSC_VER )
+ /* TRD : MSVC compiler
+
+ an unfortunately necessary hack for MSVC
+ MSVC only defines __STDC__ if /Za is given, where /Za turns off MSVC C extensions -
+ which prevents Windows header files from compiling.
+ */
+
+ #define __STDC__ 1
+ #define __STDC_HOSTED__ 1
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __STDC__ && defined __STDC_HOSTED__ && __STDC_HOSTED__ == 1 && !defined KERNEL_MODE )
+
+ #ifdef LIBTEST_PAL_FREE
+ #error More than one porting abstraction layer matches the current platform in "libtest_porting_abstraction_layer_free.c".
+ #endif
+
+ #define LIBTEST_PAL_FREE
+
+ void libtest_pal_free( void *memory )
+ {
+ LFDS710_PAL_ASSERT( memory != NULL );
+
+ free( memory );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && defined KERNEL_MODE )
+
+ #ifdef LIBTEST_PAL_FREE
+ #error More than one porting abstraction layer matches the current platform in "libtest_porting_abstraction_layer_free.c".
+ #endif
+
+ #define LIBTEST_PAL_FREE
+
+ void libtest_pal_free( void *memory )
+ {
+ LFDS710_PAL_ASSERT( memory != NULL );
+
+ ExFreePoolWithTag( memory, 'sdfl' );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && defined KERNEL_MODE )
+
+ #ifdef LIBTEST_PAL_FREE
+ #error More than one porting abstraction layer matches the current platform in "libtest_porting_abstraction_layer_free.c".
+ #endif
+
+ #define LIBTEST_PAL_FREE
+
+ void libtest_pal_free( void *memory )
+ {
+ LFDS710_PAL_ASSERT( memory != NULL );
+
+ vfree( memory );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBTEST_PAL_FREE )
+
+ void libtest_pal_free( void *memory )
+ {
+ LFDS710_PAL_ASSERT( memory != NULL );
+
+ return;
+ }
+
+#endif
+
--- /dev/null
+/***** includes *****/
+#include "libtest_porting_abstraction_layer_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && !defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_WIN7 )
+
+ #ifdef LIBTEST_PAL_GET_LOGICAL_CORE_IDS
+ #error More than one porting abstraction layer matches current platform in "libtest_porting_abstraction_layer_get_full_logical_processor_set.c".
+ #endif
+
+ #define LIBTEST_PAL_GET_LOGICAL_CORE_IDS
+
+ void libtest_pal_get_full_logical_processor_set( struct lfds710_list_asu_state *list_of_logical_processors,
+ struct libshared_memory_state *ms )
+ {
+ BOOL
+ rv;
+
+ DWORD
+ offset = 0,
+ slpie_length = 0;
+
+ lfds710_pal_uint_t
+ bitmask,
+ logical_processor_number,
+ windows_processor_group_number;
+
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX
+ *slpie,
+ *slpie_buffer = NULL;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ lfds710_list_asu_init_valid_on_current_logical_core( list_of_logical_processors, NULL );
+
+ rv = GetLogicalProcessorInformationEx( RelationGroup, slpie_buffer, &slpie_length );
+ slpie_buffer = libshared_memory_alloc_from_most_free_space_node( ms, slpie_length, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ rv = GetLogicalProcessorInformationEx( RelationGroup, slpie_buffer, &slpie_length );
+
+ while( offset < slpie_length )
+ {
+ slpie = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *) ( (char unsigned *) slpie_buffer + offset );
+
+ offset += slpie->Size;
+
+ if( slpie->Relationship == RelationGroup )
+ for( windows_processor_group_number = 0 ; windows_processor_group_number < slpie->Group.ActiveGroupCount ; windows_processor_group_number++ )
+ for( logical_processor_number = 0 ; logical_processor_number < sizeof(KAFFINITY) * BITS_PER_BYTE ; logical_processor_number++ )
+ {
+ bitmask = (lfds710_pal_uint_t) 1 << logical_processor_number;
+
+ // TRD : if we've found a processor for this group, add it to the list
+ if( slpie->Group.GroupInfo[windows_processor_group_number].ActiveProcessorMask & bitmask )
+ libtest_misc_pal_helper_add_logical_processor_to_list_of_logical_processors( list_of_logical_processors, ms, logical_processor_number, windows_processor_group_number );
+ }
+ }
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && !defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_WINXP && NTDDI_VERSION < NTDDI_WIN7 )
+
+ #ifdef LIBTEST_PAL_GET_LOGICAL_CORE_IDS
+ #error More than one porting abstraction layer matches current platform in "libtest_porting_abstraction_layer_get_full_logical_processor_set.c".
+ #endif
+
+ #define LIBTEST_PAL_GET_LOGICAL_CORE_IDS
+
+ void libtest_pal_get_full_logical_processor_set( struct lfds710_list_asu_state *list_of_logical_processors,
+ struct libshared_memory_state *ms )
+ {
+ DWORD
+ slpi_length = 0;
+
+ lfds710_pal_uint_t
+ number_slpi,
+ loop;
+
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION
+ *slpi = NULL;
+
+ ULONG_PTR
+ mask;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ lfds710_list_asu_init_valid_on_current_logical_core( list_of_logical_processors, NULL, NULL );
+
+ *number_logical_processors = 0;
+
+ GetLogicalProcessorInformation( slpi, &slpi_length );
+ slpi = libshared_memory_alloc_from_most_free_space_node( ms, slpi_length, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ GetLogicalProcessorInformation( slpi, &slpi_length );
+ number_slpi = slpi_length / sizeof(SYSTEM_LOGICAL_PROCESSOR_INFORMATION);
+
+ for( loop = 0 ; loop < number_slpi ; loop++ )
+ if( (slpi+loop)->Relationship == RelationProcessorCore )
+ for( logical_processor_number = 0 ; logical_processor_number < sizeof(ULONG_PTR) * BITS_PER_BYTE ; logical_processor_number++ )
+ {
+ bitmask = 1 << logical_processor_number;
+
+ if( (slpi+loop)->ProcessorMask & bitmask )
+ libtest_misc_pal_helper_add_logical_processor_to_list_of_logical_processors( list_of_logical_processors, ms, logical_processor_number, windows_processor_group_number );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_WIN7 )
+
+ #ifdef LIBTEST_PAL_GET_LOGICAL_CORE_IDS
+ #error More than one porting abstraction layer matches current platform in "libtest_porting_abstraction_layer_get_full_logical_processor_set.c".
+ #endif
+
+ #define LIBTEST_PAL_GET_LOGICAL_CORE_IDS
+
+ void libtest_pal_get_full_logical_processor_set( struct lfds710_list_asu_state *list_of_logical_processors,
+ struct libshared_memory_state *ms )
+ {
+ lfds710_pal_uint_t
+ bitmask,
+ logical_processor_number,
+ windows_processor_group_number;
+
+ NTSTATUS
+ rv;
+
+ SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX
+ *slpie,
+ *slpie_buffer = NULL;
+
+ ULONG
+ offset = 0,
+ slpie_length = 0;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ lfds710_list_asu_init_valid_on_current_logical_core( list_of_logical_processors, NULL );
+
+ rv = KeQueryLogicalProcessorRelationship( NULL, RelationGroup, slpie_buffer, &slpie_length );
+ slpie_buffer = libshared_memory_alloc_from_most_free_space_node( ms, slpie_length, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ rv = KeQueryLogicalProcessorRelationship( NULL, RelationGroup, slpie_buffer, &slpie_length );
+
+ while( offset < slpie_length )
+ {
+ slpie = (SYSTEM_LOGICAL_PROCESSOR_INFORMATION_EX *) ( (char unsigned *) slpie_buffer + offset );
+
+ offset += slpie->Size;
+
+ if( slpie->Relationship == RelationGroup )
+ for( windows_processor_group_number = 0 ; windows_processor_group_number < slpie->Group.ActiveGroupCount ; windows_processor_group_number++ )
+ for( logical_processor_number = 0 ; logical_processor_number < sizeof(KAFFINITY) * BITS_PER_BYTE ; logical_processor_number++ )
+ {
+ bitmask = (lfds710_pal_uint_t) 1 << logical_processor_number;
+
+ // TRD : if we've found a processor for this group, add it to the list
+ if( slpie->Group.GroupInfo[windows_processor_group_number].ActiveProcessorMask & bitmask )
+ libtest_misc_pal_helper_add_logical_processor_to_list_of_logical_processors( list_of_logical_processors, ms, logical_processor_number, windows_processor_group_number );
+ }
+ }
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && defined KERNEL_MODE && NTDDI_VERSION >= NTDDI_WINXP && NTDDI_VERSION < NTDDI_WIN7 )
+
+ #ifdef LIBTEST_PAL_GET_LOGICAL_CORE_IDS
+ #error More than one porting abstraction layer matches current platform in "libtest_porting_abstraction_layer_get_full_logical_processor_set.c".
+ #endif
+
+ #define LIBTEST_PAL_GET_LOGICAL_CORE_IDS
+
+ void libtest_pal_get_full_logical_processor_set( struct lfds710_list_asu_state *list_of_logical_processors,
+ struct libshared_memory_state *ms )
+ {
+ CCHAR
+ loop;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ /* TRD : in XP, KeNumberProcessors is a CCHAR indicating the number of processors
+ the docs say nothing about whether the actual logical processor numbers are contigious or not...
+ ...which is absolutely normal for MS docs on anything to do with CPU topology - bloody useless
+ just to make the point about bloody useless, this same variable is only a CCHAR in XP
+ prior to XP, it is a pointer to a CCHAR, where that CCHAR holds the same data
+
+ jesus...*facepalm*
+ */
+
+ for( loop = 0 ; loop < KeNumberProcessors ; loop++ )
+ libtest_misc_pal_helper_add_logical_processor_to_list_of_logical_processors( list_of_logical_processors, ms, loop, 0 );
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && defined __STDC__ and __STDC_HOSTED__ == 1 )
+
+ #ifdef LIBTEST_PAL_GET_LOGICAL_CORE_IDS
+ #error More than one porting abstraction layer matches current platform in "libtest_porting_abstraction_layer_get_full_logical_processor_set.c".
+ #endif
+
+ #define LIBTEST_PAL_GET_LOGICAL_CORE_IDS
+
+ void libtest_pal_get_full_logical_processor_set( struct lfds710_list_asu_state *list_of_logical_processors,
+ struct libshared_memory_state *ms )
+ {
+ char
+ diskbuffer[BUFSIZ],
+ string[1024];
+
+ FILE
+ *diskfile;
+
+ int long long unsigned
+ logical_processor_number;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+
+ lfds710_list_asu_init_valid_on_current_logical_core( list_of_logical_processors, NULL );
+
+ diskfile = fopen( "/proc/cpuinfo", "r" );
+
+ if( diskfile != NULL )
+ {
+ setbuf( diskfile, diskbuffer );
+
+ while( NULL != fgets(string, 1024, diskfile) )
+ if( 1 == sscanf(string, "processor : %llu", &logical_processor_number) )
+ libtest_misc_pal_helper_add_logical_processor_to_list_of_logical_processors( list_of_logical_processors, ms, logical_processor_number, 0 );
+
+ fclose( diskfile );
+ }
+
+ return;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBTEST_PAL_GET_LOGICAL_CORE_IDS )
+
+ #error No matching porting abstraction layer in "libtest_porting_abstraction_layer_get_full_logical_processor_set.c".
+
+#endif
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libtest_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libtest_porting_abstraction_layer_internal.h"
+
+/* TRD : libtest_pal_malloc() and libtest_pal_free() are used for and only for
+ one queue_umm test
+
+ if either is not implemented, the test will not run
+
+ that's the only impact of their presence or absence
+*/
+
+
+
+
+
+/****************************************************************************/
+#if( defined _MSC_VER )
+
+ /* TRD : MSVC compiler
+
+ an unfortunately necessary hack for MSVC
+ MSVC only defines __STDC__ if /Za is given, where /Za turns off MSVC C extensions -
+ which prevents Windows header files from compiling.
+ */
+
+ #define __STDC__ 1
+ #define __STDC_HOSTED__ 1
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined __STDC__ && defined __STDC_HOSTED__ && __STDC_HOSTED__ == 1 && !defined KERNEL_MODE )
+
+ #ifdef LIBTEST_PAL_MALLOC
+ #error More than one porting abstraction layer matches the current platform in "libtest_porting_abstraction_layer_malloc.c".
+ #endif
+
+ #define LIBTEST_PAL_MALLOC
+
+ void *libtest_pal_malloc( lfds710_pal_uint_t size )
+ {
+ void
+ *rv;
+
+ // TRD : size can be any value in its range
+
+ rv = malloc( (size_t) size );
+
+ return rv;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( defined _WIN32 && defined KERNEL_MODE )
+
+ #ifdef LIBTEST_PAL_MALLOC
+ #error More than one porting abstraction layer matches the current platform in "libtest_porting_abstraction_layer_malloc.c".
+ #endif
+
+ #define LIBTEST_PAL_MALLOC
+
+ void *libtest_pal_malloc( lfds710_pal_uint_t size )
+ {
+ void
+ *rv;
+
+ // TRD : size can be any value in its range
+
+ /* TRD : if it assumed if lock-free data structures are being used
+ it is because they will be accessed at DISPATCH_LEVEL
+ and so the hard coded memory type is NonPagedPool
+ */
+
+ rv = ExAllocatePoolWithTag( NonPagedPool, size, 'sdfl' );
+
+ return rv;
+ }
+
+#endif
+
+
+
+
+
+
+/****************************************************************************/
+#if( defined __linux__ && defined KERNEL_MODE )
+
+ #ifdef LIBTEST_PAL_MALLOC
+ #error More than one porting abstraction layer matches the current platform in "libtest_porting_abstraction_layer_malloc.c".
+ #endif
+
+ #define LIBTEST_PAL_MALLOC
+
+ void *libtest_pal_malloc( lfds710_pal_uint_t size )
+ {
+ void
+ *rv;
+
+ // TRD : size can be any value in its range
+
+ rv = vmalloc( (int long unsigned) size );
+
+ return rv;
+ }
+
+#endif
+
+
+
+
+
+/****************************************************************************/
+#if( !defined LIBTEST_PAL_MALLOC )
+
+ void *libtest_pal_malloc( lfds710_pal_uint_t size )
+ {
+ // TRD : size can be any value in its range
+
+ return NULL;
+ }
+
+#endif
+
--- /dev/null
+/***** includes *****/
+#include "libtest_results_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libtest_results_cleanup( struct libtest_results_state *rs )
+{
+ LFDS710_PAL_ASSERT( rs != NULL );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_results_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libtest_results_get_result( struct libtest_results_state *rs, enum libtest_test_id test_id, enum lfds710_misc_validity *result )
+{
+ LFDS710_PAL_ASSERT( rs != NULL );
+ // TRD : test_id can be any value in its range
+ LFDS710_PAL_ASSERT( result != NULL );
+
+ *result = rs->dvs[test_id];
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_results_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libtest_results_init( struct libtest_results_state *rs )
+{
+ enum libtest_test_id
+ test_id;
+
+ LFDS710_PAL_ASSERT( rs != NULL );
+
+ for( test_id = 0 ; test_id < LIBTEST_TEST_ID_COUNT ; test_id++ )
+ rs->dvs[test_id] = LFDS710_MISC_VALIDITY_UNKNOWN;
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libtest_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libtest_results_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libtest_results_put_result( struct libtest_results_state *rs, enum libtest_test_id test_id, enum lfds710_misc_validity result )
+{
+ LFDS710_PAL_ASSERT( rs != NULL );
+ // TRD : test_id can be any value in its range
+ // TRD : result can be any value in its range
+
+ rs->dvs[test_id] = result;
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_test_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libtest_test_cleanup( struct libtest_test_state *ts )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+
+ // TRD : we do naaauuuutttthhiiiinnnn'
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_test_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libtest_test_init( struct libtest_test_state *ts,
+ char *name,
+ enum libtest_test_id test_id,
+ void (*test_function)(struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs) )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( name != NULL );
+ // TRD : test_id can be any value in its range
+ LFDS710_PAL_ASSERT( test_function != NULL );
+
+ ts->name = name;
+ ts->test_id = test_id;
+ ts->test_function = test_function;
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libtest_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libtest_test_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libtest_test_run( struct libtest_test_state *ts,
+ struct lfds710_list_asu_state *list_of_logical_processors,
+ struct libshared_memory_state *ms,
+ enum lfds710_misc_validity *dvs )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ ts->test_function( list_of_logical_processors, ms, dvs );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void libtest_tests_btree_au_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : struct lfds710_btree_au_element
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_btree_au_element,up) % LFDS710_PAL_ALIGN_SINGLE_POINTER != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_btree_au_element,left) % LFDS710_PAL_ALIGN_SINGLE_POINTER != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_btree_au_element,right) % LFDS710_PAL_ALIGN_SINGLE_POINTER != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_btree_au_element,value) % LFDS710_PAL_ALIGN_SINGLE_POINTER != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : struct lfds710_btree_au_state
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_btree_au_state,root) % LFDS710_PAL_ALIGN_SINGLE_POINTER != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ return;
+}
+
+#pragma warning( default : 4100 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds710_btree_au_element
+ baue;
+
+ lfds710_pal_uint_t
+ key;
+};
+
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ insert_fail_count,
+ number_elements_per_thread;
+
+ struct lfds710_btree_au_state
+ *baus;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_value, void const *value_in_tree );
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_adding( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_btree_au_random_adds_fail_on_existing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ actual_sum_insert_failure_count,
+ expected_sum_insert_failure_count,
+ index = 0,
+ *key_count_array,
+ loop = 0,
+ number_elements,
+ number_elements_per_thread,
+ number_logical_processors,
+ random_value,
+ subloop;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_btree_au_element
+ *baue = NULL;
+
+ struct lfds710_btree_au_state
+ baus;
+
+ struct lfds710_prng_state
+ ps;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ void
+ *key;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create a single btree_au
+ we generate 10k elements per thread (one per logical processor) in an array
+ we set a random number in each element, which is the key
+ random numbers are generated are from 0 to 5000, so we must have some duplicates
+ (we don't use value, so we always pass in a NULL for that when we insert)
+
+ each thread loops, adds those elements into the btree, and counts the total number of insert fails
+ (we don't count on a per value basis because of the performance hit - we'll be TLBing all the time)
+ this test has the btree_au set to fail on add, so duplicates should be eliminated
+
+ we then merge the per-thread arrays
+
+ we should find in the tree one of every value, and the sum of the counts of each value (beyond the
+ first value, which was inserted) in the merged arrays should equal the sum of the insert fails from
+ each thread
+
+ we check the count of unique values in the merged array and use that when calling the btree_au validation function
+
+ we in-order walk and check that what we have in the tree matches what we have in the merged array
+ and then check the fail counts
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ // TRD : need a counter array later
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element) + sizeof(lfds710_pal_uint_t), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / number_logical_processors;
+ key_count_array = (lfds710_pal_uint_t *) ( te_array + number_elements );
+
+ lfds710_prng_init_valid_on_current_logical_core( &ps, LFDS710_PRNG_SEED );
+
+ lfds710_btree_au_init_valid_on_current_logical_core( &baus, key_compare_function, LFDS710_BTREE_AU_EXISTING_KEY_FAIL, NULL );
+
+ // TRD : get the threads ready
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->baus = &baus;
+ (tpts+loop)->element_array = te_array + loop * number_elements_per_thread;
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+ (tpts+loop)->insert_fail_count = 0;
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_adding, &tpts[loop] );
+
+ for( subloop = 0 ; subloop < number_elements_per_thread ; subloop++ )
+ {
+ LFDS710_PRNG_GENERATE( ps, random_value );
+ ((tpts+loop)->element_array+subloop)->key = (lfds710_pal_uint_t) ( (number_elements/2) * ((double) random_value / (double) LFDS710_PRNG_MAX) );
+ }
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ /* TRD : now for validation
+ make an array equal to number_elements, set all to 0
+ iterate over every per-thread array, counting the number of each value into this array
+ so we can know how many elements ought to have failed to be inserted
+ as well as being able to work out the actual number of elements which should be present in the btree, for the btree validation call
+ */
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ *(key_count_array+loop) = 0;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ for( subloop = 0 ; subloop < number_elements_per_thread ; subloop++ )
+ ( *(key_count_array+( (tpts+loop)->element_array+subloop)->key) )++;
+
+ // TRD : first, btree validation function
+ vi.min_elements = number_elements;
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( *(key_count_array+loop) == 0 )
+ vi.min_elements--;
+
+ vi.max_elements = vi.min_elements;
+
+ lfds710_btree_au_query( &baus, LFDS710_BTREE_AU_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) dvs );
+
+ /* TRD : now check the sum of per-thread insert failures
+ is what it should be, which is the sum of key_count_array,
+ but with every count minus one (for the single succesful insert)
+ and where elements of 0 are ignored (i.e. do not have -1 applied)
+ */
+
+ expected_sum_insert_failure_count = 0;
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( *(key_count_array+loop) != 0 )
+ expected_sum_insert_failure_count += *(key_count_array+loop) - 1;
+
+ actual_sum_insert_failure_count = 0;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ actual_sum_insert_failure_count += (tpts+loop)->insert_fail_count;
+
+ if( expected_sum_insert_failure_count != actual_sum_insert_failure_count )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ /* TRD : now compared the combined array and an in-order walk of the tree
+ ignoring array elements with the value 0, we should find an exact match
+ */
+
+ if( *dvs == LFDS710_MISC_VALIDITY_VALID )
+ {
+ // TRD : in-order walk over btree_au and check key_count_array matches
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&baus, &baue, LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ key = LFDS710_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue );
+
+ while( *(key_count_array+index) == 0 )
+ index++;
+
+ if( index++ != (lfds710_pal_uint_t) key )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+ }
+
+ lfds710_btree_au_cleanup( &baus, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int key_compare_function( void const *new_key, void const *key_in_tree )
+{
+ int
+ cr = 0;
+
+ // TRD : key_new can be any value in its range
+ // TRD : key_in_tree can be any value in its range
+
+ if( (lfds710_pal_uint_t) new_key < (lfds710_pal_uint_t) key_in_tree )
+ cr = -1;
+
+ if( (lfds710_pal_uint_t) new_key > (lfds710_pal_uint_t) key_in_tree )
+ cr = 1;
+
+ return cr;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_adding( void *libtest_threadset_per_thread_state )
+{
+ enum lfds710_btree_au_insert_result
+ alr;
+
+ lfds710_pal_uint_t
+ index = 0;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( index < tpts->number_elements_per_thread )
+ {
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( (tpts->element_array+index)->baue, (tpts->element_array+index)->key );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( (tpts->element_array+index)->baue, 0 );
+ alr = lfds710_btree_au_insert( tpts->baus, &(tpts->element_array+index)->baue, NULL );
+
+ if( alr == LFDS710_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY )
+ tpts->insert_fail_count++;
+
+ index++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_value, void const *value_in_tree );
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libtest_tests_btree_au_fail_and_overwrite_on_existing_key( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ enum lfds710_btree_au_insert_result
+ alr;
+
+ struct lfds710_btree_au_element
+ baue_one,
+ baue_two,
+ *existing_baue;
+
+ struct lfds710_btree_au_state
+ baus;
+
+ void
+ *value;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : the random_adds tests with fail and overwrite don't (can't, not in a performant manner)
+ test that the fail and/or overwrite of user data has *actually* happened - they use the
+ return value from the link function call, rather than empirically observing the final
+ state of the tree
+
+ as such, we now have a couple of single threaded tests where we check that the user data
+ value really is being modified (or not modified, as the case may be)
+ */
+
+ // internal_display_test_name( "Fail and overwrite on existing key" );
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ /* TRD : so, we make a tree which is fail on existing
+ add one element, with a known user data
+ we then try to add the same key again, with a different user data
+ the call should fail, and then we get the element by its key
+ and check its user data is unchanged
+ (and confirm the failed link returned the correct existing_baue)
+ that's the first test done
+ */
+
+ lfds710_btree_au_init_valid_on_current_logical_core( &baus, key_compare_function, LFDS710_BTREE_AU_EXISTING_KEY_FAIL, NULL );
+
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( baue_one, 0 );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( baue_one, 1 );
+ alr = lfds710_btree_au_insert( &baus, &baue_one, NULL );
+
+ if( alr != LFDS710_BTREE_AU_INSERT_RESULT_SUCCESS )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( baue_two, 0 );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( baue_two, 2 );
+ alr = lfds710_btree_au_insert( &baus, &baue_two, &existing_baue );
+
+ if( alr != LFDS710_BTREE_AU_INSERT_RESULT_FAILURE_EXISTING_KEY )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( existing_baue != &baue_one )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ value = LFDS710_BTREE_AU_GET_VALUE_FROM_ELEMENT( *existing_baue );
+
+ if( (void *) (lfds710_pal_uint_t) value != (void *) (lfds710_pal_uint_t) 1 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ lfds710_btree_au_cleanup( &baus, NULL );
+
+ /* TRD : second test, make a tree which is overwrite on existing
+ add one element, with a known user data
+ we then try to add the same key again, with a different user data
+ the call should succeed, and then we get the element by its key
+ and check its user data is changed
+ (and confirm the failed link returned the correct existing_baue)
+ that's the secondtest done
+ */
+
+ lfds710_btree_au_init_valid_on_current_logical_core( &baus, key_compare_function, LFDS710_BTREE_AU_EXISTING_KEY_OVERWRITE, NULL );
+
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( baue_one, 0 );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( baue_one, 1 );
+ alr = lfds710_btree_au_insert( &baus, &baue_one, NULL );
+
+ if( alr != LFDS710_BTREE_AU_INSERT_RESULT_SUCCESS )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( baue_two, 0 );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( baue_two, 2 );
+ alr = lfds710_btree_au_insert( &baus, &baue_two, NULL );
+
+ if( alr != LFDS710_BTREE_AU_INSERT_RESULT_SUCCESS_OVERWRITE )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ lfds710_btree_au_cleanup( &baus, NULL );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *key_in_tree )
+{
+ int
+ cr = 0;
+
+ // TRD : key_new can be any value in its range
+ // TRD : key_in_tree can be any value in its range
+
+ if( (lfds710_pal_uint_t) new_key < (lfds710_pal_uint_t) key_in_tree )
+ cr = -1;
+
+ if( (lfds710_pal_uint_t) new_key > (lfds710_pal_uint_t) key_in_tree )
+ cr = 1;
+
+ return cr;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds710_btree_au_element
+ baue;
+
+ lfds710_pal_uint_t
+ key;
+};
+
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ insert_existing_count,
+ number_elements_per_thread;
+
+ struct lfds710_btree_au_state
+ *baus;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_value, void const *value_in_tree );
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_adding( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_btree_au_random_adds_overwrite_on_existing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ actual_sum_insert_existing_count,
+ expected_sum_insert_existing_count,
+ index = 0,
+ *key_count_array,
+ loop = 0,
+ number_elements,
+ number_elements_per_thread,
+ number_logical_processors,
+ random_value,
+ subloop;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_btree_au_element
+ *baue = NULL;
+
+ struct lfds710_btree_au_state
+ baus;
+
+ struct lfds710_prng_state
+ ps;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ void
+ *key;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create a single btree_au
+ we generate 10k elements per thread (one per logical processor) in an array
+ we set a random number in each element, which is the key
+ random numbers are generated are from 0 to 5000, so we must have some duplicates
+ (we don't use value, so we always pass in a NULL for that when we insert)
+
+ each thread loops, adds those elements into the btree, and counts the total number of insert fails
+ (we don't count on a per value basis because of the performance hit - we'll be TLBing all the time)
+ this test has the btree_au set to overwrite on add, so duplicates should be eliminated
+
+ we then merge the per-thread arrays
+
+ we should find in the tree one of every value, and the sum of the counts of each value (beyond the
+ first value, which was inserted) in the merged arrays should equal the sum of the existing_baues returned
+ from each thread when they inserted and found an existing element
+
+ we check the count of unique values in the merged array and use that when calling the btree_au validation function
+
+ we in-order walk and check that what we have in the tree matches what we have in the merged array
+ and then check the fail counts
+ */
+
+ // internal_display_test_name( "Random adds and walking (overwrite on existing key)" );
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ // TRD : need a counter array later
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element) + sizeof(lfds710_pal_uint_t), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / number_logical_processors;
+ key_count_array = (lfds710_pal_uint_t *) ( te_array + number_elements );
+
+ lfds710_prng_init_valid_on_current_logical_core( &ps, LFDS710_PRNG_SEED );
+
+ lfds710_btree_au_init_valid_on_current_logical_core( &baus, key_compare_function, LFDS710_BTREE_AU_EXISTING_KEY_OVERWRITE, NULL );
+
+ // TRD : get the threads ready
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->baus = &baus;
+ (tpts+loop)->element_array = te_array + loop * number_elements_per_thread;
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+ (tpts+loop)->insert_existing_count = 0;
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_adding, &tpts[loop] );
+
+ for( subloop = 0 ; subloop < number_elements_per_thread ; subloop++ )
+ {
+ LFDS710_PRNG_GENERATE( ps, random_value );
+ ((tpts+loop)->element_array+subloop)->key = (lfds710_pal_uint_t) ( (number_elements/2) * ((double) random_value / (double) LFDS710_PRNG_MAX) );
+ }
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ /* TRD : now for validation
+ make an array equal to number_elements, set all to 0
+ iterate over every per-thread array, counting the number of each value into this array
+ so we can know how many elements ought to have failed to be inserted
+ as well as being able to work out the actual number of elements which should be present in the btree, for the btree validation call
+ */
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ *(key_count_array+loop) = 0;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ for( subloop = 0 ; subloop < number_elements_per_thread ; subloop++ )
+ ( *(key_count_array+( (tpts+loop)->element_array+subloop)->key) )++;
+
+ // TRD : first, btree validation function
+ vi.min_elements = number_elements;
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( *(key_count_array+loop) == 0 )
+ vi.min_elements--;
+
+ vi.max_elements = vi.min_elements;
+
+ lfds710_btree_au_query( &baus, LFDS710_BTREE_AU_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) dvs );
+
+ /* TRD : now check the sum of per-thread insert failures
+ is what it should be, which is the sum of key_count_array,
+ but with every count minus one (for the single succesful insert)
+ and where elements of 0 are ignored (i.e. do not have -1 applied)
+ */
+
+ expected_sum_insert_existing_count = 0;
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( *(key_count_array+loop) != 0 )
+ expected_sum_insert_existing_count += *(key_count_array+loop) - 1;
+
+ actual_sum_insert_existing_count = 0;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ actual_sum_insert_existing_count += (tpts+loop)->insert_existing_count;
+
+ if( expected_sum_insert_existing_count != actual_sum_insert_existing_count )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ /* TRD : now compared the combined array and an in-order walk of the tree
+ ignoring array elements with the value 0, we should find an exact match
+ */
+
+ if( *dvs == LFDS710_MISC_VALIDITY_VALID )
+ {
+ // TRD : in-order walk over btree_au and check key_count_array matches
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and lfds710_btree_au_get_by_absolute_position_and_then_by_relative_position(&baus, &baue, LFDS710_BTREE_AU_ABSOLUTE_POSITION_SMALLEST_IN_TREE, LFDS710_BTREE_AU_RELATIVE_POSITION_NEXT_LARGER_ELEMENT_IN_ENTIRE_TREE) )
+ {
+ key = LFDS710_BTREE_AU_GET_KEY_FROM_ELEMENT( *baue );
+
+ while( *(key_count_array+index) == 0 )
+ index++;
+
+ if( index++ != (lfds710_pal_uint_t) key )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+ }
+
+ lfds710_btree_au_cleanup( &baus, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int key_compare_function( void const *new_key, void const *key_in_tree )
+{
+ int
+ cr = 0;
+
+ // TRD : key_new can be any value in its range
+ // TRD : key_in_tree can be any value in its range
+
+ if( (lfds710_pal_uint_t) new_key < (lfds710_pal_uint_t) key_in_tree )
+ cr = -1;
+
+ if( (lfds710_pal_uint_t) new_key > (lfds710_pal_uint_t) key_in_tree )
+ cr = 1;
+
+ return cr;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_adding( void *libtest_threadset_per_thread_state )
+{
+ enum lfds710_btree_au_insert_result
+ alr;
+
+ lfds710_pal_uint_t
+ index = 0;
+
+ struct lfds710_btree_au_element
+ *existing_baue;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( index < tpts->number_elements_per_thread )
+ {
+ LFDS710_BTREE_AU_SET_KEY_IN_ELEMENT( (tpts->element_array+index)->baue, (tpts->element_array+index)->key );
+ LFDS710_BTREE_AU_SET_VALUE_IN_ELEMENT( (tpts->element_array+index)->baue, 0 );
+ alr = lfds710_btree_au_insert( tpts->baus, &(tpts->element_array+index)->baue, &existing_baue );
+
+ if( alr == LFDS710_BTREE_AU_INSERT_RESULT_SUCCESS_OVERWRITE )
+ tpts->insert_existing_count++;
+
+ index++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void libtest_tests_freelist_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : struct lfds710_freelist_state
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_freelist_state,top) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_freelist_state,elimination_array_size_in_elements) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ return;
+}
+
+#pragma warning( default : 4100 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ struct lfds710_freelist_state
+ *fs;
+
+ struct lfds710_prng_st_state
+ psts;
+};
+
+struct test_element
+{
+ struct lfds710_freelist_element
+ fe;
+
+ enum flag
+ popped_flag;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_freelist_ea_popping( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop = 0,
+ number_elements,
+ number_elements_in_freelist,
+ number_logical_processors,
+ raised_count = 0,
+ random_value,
+ smallest_power_of_two_larger_than_or_equal_to_number_logical_processors = 2,
+ temp_number_logical_processors;
+
+ struct lfds710_freelist_element * volatile
+ (*ea)[LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS];
+
+ struct lfds710_freelist_state
+ fs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct lfds710_prng_st_state
+ psts;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create a freelist with as many elements as possible elements
+
+ the creation function runs in a single thread and creates
+ and pushes thofe elements onto the freelist
+
+ each element contains a void pointer to the container test element
+
+ we then run one thread per CPU
+ where each thread loops, popping as quickly as possible
+ each test element has a flag which indicates it has been popped
+
+ the threads run till the source freelist is empty
+
+ we then check the test elements
+ every element should have been popped
+
+ then tidy up
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_prng_st_init( &psts, LFDS710_PRNG_SEED );
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ temp_number_logical_processors = number_logical_processors >> 2;
+ while( temp_number_logical_processors != 0 )
+ {
+ temp_number_logical_processors >>= 1;
+ smallest_power_of_two_larger_than_or_equal_to_number_logical_processors <<= 1;
+ }
+
+ // TRD : allocate
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ea = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct lfds710_freelist_element *) * LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS * smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ lfds710_freelist_init_valid_on_current_logical_core( &fs, ea, smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, NULL );
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ {
+ (te_array+loop)->popped_flag = LOWERED;
+ LFDS710_FREELIST_SET_VALUE_IN_ELEMENT( (te_array+loop)->fe, te_array+loop );
+ lfds710_freelist_push( &fs, &(te_array+loop)->fe, &psts );
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ tpts[loop].fs = &fs;
+ LFDS710_PRNG_ST_GENERATE( psts, random_value );
+ LFDS710_PRNG_ST_MIXING_FUNCTION( random_value );
+ lfds710_prng_st_init( &tpts[loop].psts, random_value );
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_popping, &tpts[loop] );
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ /* TRD : there is a chance, although tiny, that some elements remain in the elimination layer
+ each worker thread returns when a pop() fails, so they can return while elements remain in the EL
+ so now we're validating, we ask the freelist for a count
+ we then count the number of elements in the te_array which are RAISED and LOWERED
+ and the LOWERED count should equal the number of elements remaining in the freelist
+ we could go further and check they are the *same* elements, but this all needs rewriting...
+ */
+
+ lfds710_freelist_query( &fs, LFDS710_FREELIST_QUERY_SINGLETHREADED_GET_COUNT, NULL, &number_elements_in_freelist );
+
+ vi.min_elements = vi.max_elements = number_elements_in_freelist;
+
+ lfds710_freelist_query( &fs, LFDS710_FREELIST_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ // TRD : now we check each element has popped_flag fet to RAISED
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( (te_array+loop)->popped_flag == RAISED )
+ raised_count++;
+
+ if( raised_count != number_elements - number_elements_in_freelist )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : cleanup
+ lfds710_freelist_cleanup( &fs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping( void *libtest_threadset_per_thread_state )
+{
+ struct lfds710_freelist_element
+ *fe;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_element
+ *te;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( lfds710_freelist_pop(tpts->fs, &fe, &tpts->psts) )
+ {
+ te = LFDS710_FREELIST_GET_VALUE_FROM_ELEMENT( *fe );
+ te->popped_flag = RAISED;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element;
+
+struct test_per_thread_state
+{
+ struct lfds710_freelist_state
+ fs_thread_local,
+ *fs;
+
+ struct lfds710_prng_st_state
+ psts;
+
+ lfds710_pal_uint_t
+ number_elements_per_thread;
+};
+
+struct test_element
+{
+ struct lfds710_freelist_element
+ fe,
+ thread_local_fe;
+
+ lfds710_pal_uint_t
+ datum;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping_and_pushing_start_popping( void *libtest_threadset_per_thread_state );
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping_and_pushing_start_pushing( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_freelist_ea_popping_and_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_elements,
+ number_elements_per_thread,
+ number_logical_processors,
+ random_value,
+ smallest_power_of_two_larger_than_or_equal_to_number_logical_processors = 2,
+ subloop,
+ temp_number_logical_processors;
+
+ struct lfds710_freelist_element * volatile
+ (**ea)[LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS];
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_freelist_state
+ fs;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct lfds710_prng_st_state
+ psts;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we have two threads per CPU
+ the threads loop for ten feconds
+ the first thread pushes 10000 elements then pops 10000 elements
+ the fecond thread pops 10000 elements then pushes 10000 elements
+ all pushes and pops go onto the single main freelist
+ with a per-thread local freelist to store the pops
+
+ after time is up, all threads push what they have remaining onto
+ the main freelist
+
+ we then validate the main freelist
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_prng_st_init( &psts, LFDS710_PRNG_SEED );
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ temp_number_logical_processors = number_logical_processors >> 2;
+ while( temp_number_logical_processors != 0 )
+ {
+ temp_number_logical_processors >>= 1;
+ smallest_power_of_two_larger_than_or_equal_to_number_logical_processors <<= 1;
+ }
+
+ // TRD : allocate
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors * 2, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors * 2, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ea = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct lfds710_freelist_element **) * (number_logical_processors * 2 + 1), sizeof(struct lfds710_freelist_element *) );
+ for( loop = 0 ; loop < number_logical_processors * 2 + 1 ; loop++ )
+ ea[loop] = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct lfds710_freelist_element *) * LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS * smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / (number_logical_processors * 2);
+
+ lfds710_freelist_init_valid_on_current_logical_core( &fs, ea[0], smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, NULL );
+
+ // TRD : half of all elements in the main freelist so the popping threads can start immediately
+ for( loop = 0 ; loop < number_elements_per_thread * number_logical_processors ; loop++ )
+ {
+ (te_array+loop)->datum = loop;
+ LFDS710_FREELIST_SET_VALUE_IN_ELEMENT( (te_array+loop)->fe, te_array+loop );
+ lfds710_freelist_push( &fs, &(te_array+loop)->fe, &psts );
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ // TRD : first fet of threads (poppers)
+ (tpts+loop)->fs = &fs;
+ LFDS710_PRNG_ST_GENERATE( psts, random_value );
+ LFDS710_PRNG_ST_MIXING_FUNCTION( random_value );
+ lfds710_prng_st_init( &(tpts+loop)->psts, random_value );
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+ lfds710_freelist_init_valid_on_current_logical_core( &(tpts+loop)->fs_thread_local, ea[loop+1], smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, NULL );
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_popping_and_pushing_start_popping, &tpts[loop] );
+
+ // TRD : fecond fet of threads (pushers - who need elements in their per-thread freelists)
+ (tpts+loop+number_logical_processors)->fs = &fs;
+ LFDS710_PRNG_ST_GENERATE( psts, random_value );
+ LFDS710_PRNG_ST_MIXING_FUNCTION( random_value );
+ lfds710_prng_st_init( &(tpts+loop+number_logical_processors)->psts, random_value );
+ (tpts+loop+number_logical_processors)->number_elements_per_thread = number_elements_per_thread;
+ lfds710_freelist_init_valid_on_current_logical_core( &(tpts+loop+number_logical_processors)->fs_thread_local, ea[loop+1+number_logical_processors], number_logical_processors, NULL );
+ libtest_threadset_add_thread( &ts, &pts[loop+number_logical_processors], lp, thread_popping_and_pushing_start_pushing, &tpts[loop+number_logical_processors] );
+
+ for( subloop = number_elements_per_thread * (number_logical_processors + loop) ; subloop < number_elements_per_thread * (number_logical_processors + loop + 1) ; subloop++ )
+ {
+ LFDS710_FREELIST_SET_VALUE_IN_ELEMENT( (te_array+subloop)->thread_local_fe, (te_array+subloop) );
+ lfds710_freelist_push( &(tpts+loop+number_logical_processors)->fs_thread_local, &(te_array+subloop)->thread_local_fe, &psts );
+ }
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements_per_thread * number_logical_processors * 2;
+
+ lfds710_freelist_query( &fs, LFDS710_FREELIST_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) dvs );
+
+ lfds710_freelist_cleanup( &fs, NULL );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ lfds710_freelist_cleanup( &(tpts+loop)->fs_thread_local, NULL );
+ lfds710_freelist_cleanup( &(tpts+loop+number_logical_processors)->fs_thread_local, NULL );
+ }
+
+ return;
+}
+
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping_and_pushing_start_popping( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ count;
+
+ struct lfds710_freelist_element
+ *fe;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ start_time = time( NULL );
+
+ while( time(NULL) < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ count = 0;
+
+ while( count < tpts->number_elements_per_thread )
+ if( lfds710_freelist_pop(tpts->fs, &fe, &tpts->psts) )
+ {
+ // TRD : we do nothing with the test data, so there'fs no GET or SET here
+ lfds710_freelist_push( &tpts->fs_thread_local, fe, &tpts->psts );
+ count++;
+ }
+
+ // TRD : return our local freelist to the main freelist
+ while( lfds710_freelist_pop(&tpts->fs_thread_local, &fe, &tpts->psts) )
+ lfds710_freelist_push( tpts->fs, fe, &tpts->psts );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping_and_pushing_start_pushing( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ count;
+
+ struct lfds710_freelist_element
+ *fe;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ start_time = time( NULL );
+
+ while( time(NULL) < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ // TRD : return our local freelist to the main freelist
+ while( lfds710_freelist_pop(&tpts->fs_thread_local, &fe, &tpts->psts) )
+ lfds710_freelist_push( tpts->fs, fe, &tpts->psts );
+
+ count = 0;
+
+ while( count < tpts->number_elements_per_thread )
+ if( lfds710_freelist_pop(tpts->fs, &fe, &tpts->psts) )
+ {
+ lfds710_freelist_push( &tpts->fs_thread_local, fe, &tpts->psts );
+ count++;
+ }
+ }
+
+ // TRD : now push whatever we have in our local freelist
+ while( lfds710_freelist_pop(&tpts->fs_thread_local, &fe, &tpts->psts) )
+ lfds710_freelist_push( tpts->fs, fe, &tpts->psts );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ number_elements_per_thread,
+ thread_number;
+
+ struct lfds710_freelist_state
+ *fs;
+
+ struct lfds710_prng_st_state
+ psts;
+
+ struct test_element
+ *te_array;
+};
+
+struct test_element
+{
+ struct lfds710_freelist_element
+ fe;
+
+ lfds710_pal_uint_t
+ datum,
+ thread_number;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_pushing( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_freelist_ea_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop = 0,
+ number_elements,
+ number_elements_per_thread,
+ number_logical_processors,
+ random_value,
+ smallest_power_of_two_larger_than_or_equal_to_number_logical_processors = 2,
+ temp_number_logical_processors;
+
+ struct lfds710_freelist_element * volatile
+ (*ea)[LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS];
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_freelist_state
+ fs;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct lfds710_prng_st_state
+ psts;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create an empty freelist
+
+ we then create one thread per CPU, where each thread
+ pushes 100,000 elements each as quickly as possible to the freelist
+
+ the data pushed is a counter and a thread ID
+
+ the threads exit when the freelist is full
+
+ we then validate the freelist;
+
+ checking that the counts increment on a per unique ID basis
+ and that the number of elements we pop equals 100,000 per thread
+ (since each element has an incrementing counter which is
+ unique on a per unique ID basis, we can know we didn't lofe
+ any elements)
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_prng_st_init( &psts, LFDS710_PRNG_SEED );
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ temp_number_logical_processors = number_logical_processors >> 2;
+ while( temp_number_logical_processors != 0 )
+ {
+ temp_number_logical_processors >>= 1;
+ smallest_power_of_two_larger_than_or_equal_to_number_logical_processors <<= 1;
+ }
+
+ // TRD : allocate
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ea = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct lfds710_freelist_element *) * LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS * smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / number_logical_processors;
+
+ // TRD : the main freelist
+ lfds710_freelist_init_valid_on_current_logical_core( &fs, ea, smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, NULL );
+
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ (tpts+loop)->fs = &fs;
+ (tpts+loop)->thread_number = loop;
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+ (tpts+loop)->te_array = te_array + loop * number_elements_per_thread;
+ LFDS710_PRNG_ST_GENERATE( psts, random_value );
+ LFDS710_PRNG_ST_MIXING_FUNCTION( random_value );
+ lfds710_prng_st_init( &(tpts+loop)->psts, random_value );
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_pushing, &tpts[loop] );
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements_per_thread * number_logical_processors;
+
+ lfds710_freelist_query( &fs, LFDS710_FREELIST_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ lfds710_freelist_cleanup( &fs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_pushing( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ for( loop = 0 ; loop < tpts->number_elements_per_thread ; loop++ )
+ {
+ (tpts->te_array+loop)->thread_number = tpts->thread_number;
+ (tpts->te_array+loop)->datum = loop;
+ }
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ for( loop = 0 ; loop < tpts->number_elements_per_thread ; loop++ )
+ {
+ LFDS710_FREELIST_SET_VALUE_IN_ELEMENT( (tpts->te_array+loop)->fe, tpts->te_array+loop );
+ lfds710_freelist_push( tpts->fs, &(tpts->te_array+loop)->fe, &tpts->psts );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ struct lfds710_freelist_state
+ *fs;
+
+ struct lfds710_prng_st_state
+ psts;
+};
+
+struct test_element
+{
+ struct lfds710_freelist_element
+ fe;
+
+ lfds710_pal_uint_t
+ datum;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_rapid_popping_and_pushing( void *libshared_threads_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_freelist_ea_rapid_popping_and_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ count,
+ ea_size_in_freelist_elements,
+ index = 0,
+ loop,
+ number_logical_processors,
+ random_value,
+ smallest_power_of_two_larger_than_or_equal_to_number_logical_processors = 2,
+ temp_number_logical_processors;
+
+ struct lfds710_freelist_element * volatile
+ (*ea)[LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS];
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct lfds710_prng_st_state
+ psts;
+
+ struct lfds710_freelist_state
+ fs;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : in these tests there is a fundamental antagonism between
+ how much checking/memory clean up that we do and the
+ likelyhood of collisions between threads in their lock-free
+ operations
+
+ the lock-free operations are very quick; if we do anything
+ much at all between operations, we greatly reduce the chance
+ of threads colliding
+
+ so we have some tests which do enough checking/clean up that
+ they can tell the freelist is valid and don't leak memory
+ and here, this test now is one of thofe which does minimal
+ checking - in fact, the nature of the test is that you can't
+ do any real checking - but goes very quickly
+
+ what we do is create a small freelist and then run one thread
+ per CPU, where each thread simply pushes and then immediately
+ pops
+
+ the test runs for ten feconds
+
+ after the test is done, the only check we do is to traverfe
+ the freelist, checking for loops and ensuring the number of
+ elements is correct
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_prng_st_init( &psts, LFDS710_PRNG_SEED );
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ temp_number_logical_processors = number_logical_processors >> 2;
+ while( temp_number_logical_processors != 0 )
+ {
+ temp_number_logical_processors >>= 1;
+ smallest_power_of_two_larger_than_or_equal_to_number_logical_processors <<= 1;
+ }
+
+ // TRD : allocate
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ ea = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct lfds710_freelist_element *) * LFDS710_FREELIST_ELIMINATION_ARRAY_ELEMENT_SIZE_IN_FREELIST_ELEMENTS * smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds710_freelist_init_valid_on_current_logical_core( &fs, ea, smallest_power_of_two_larger_than_or_equal_to_number_logical_processors, NULL );
+ lfds710_freelist_query( &fs, LFDS710_FREELIST_QUERY_GET_ELIMINATION_ARRAY_EXTRA_ELEMENTS_IN_FREELIST_ELEMENTS, NULL, (void *) &ea_size_in_freelist_elements );
+
+ te_array = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_element) * (number_logical_processors + ea_size_in_freelist_elements), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ tpts[loop].fs = &fs;
+ LFDS710_PRNG_ST_GENERATE( psts, random_value );
+ LFDS710_PRNG_ST_MIXING_FUNCTION( random_value );
+ lfds710_prng_st_init( &tpts[loop].psts, random_value );
+ }
+
+ for( loop = 0 ; loop < (number_logical_processors + ea_size_in_freelist_elements) ; loop++ )
+ {
+ LFDS710_FREELIST_SET_VALUE_IN_ELEMENT( te_array[loop].fe, &te_array[loop] );
+ lfds710_freelist_push( &fs, &te_array[loop].fe, &psts );
+ }
+
+ lfds710_freelist_query( &fs, LFDS710_FREELIST_QUERY_SINGLETHREADED_GET_COUNT, NULL, (void *) &count );
+
+ // TRD : get the threads ready
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ libtest_threadset_add_thread( &ts, &pts[index], lp, thread_rapid_popping_and_pushing, &tpts[index] );
+ index++;
+ }
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = (number_logical_processors + ea_size_in_freelist_elements);
+
+ lfds710_freelist_query( &fs, LFDS710_FREELIST_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ lfds710_freelist_cleanup( &fs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_rapid_popping_and_pushing( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ time_loop = 0;
+
+ struct lfds710_freelist_element
+ *fe;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ lfds710_freelist_pop( tpts->fs, &fe, &tpts->psts );
+ lfds710_freelist_push( tpts->fs, fe, &tpts->psts );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ struct lfds710_freelist_state
+ *fs;
+};
+
+struct test_element
+{
+ struct lfds710_freelist_element
+ fe;
+
+ enum flag
+ popped_flag;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_freelist_without_ea_popping( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop = 0,
+ number_elements,
+ number_logical_processors;
+
+ struct lfds710_freelist_state
+ fs;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_misc_validation_info
+ vi = { 0, 0 };
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create a freelist with as many elements as possible elements
+
+ the creation function runs in a single thread and creates
+ and pushes thofe elements onto the freelist
+
+ each element contains a void pointer to the container test element
+
+ we then run one thread per CPU
+ where each thread loops, popping as quickly as possible
+ each test element has a flag which indicates it has been popped
+
+ the threads run till the source freelist is empty
+
+ we then check the test elements
+ every element should have been popped
+
+ then tidy up
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ // TRD : allocate
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ lfds710_freelist_init_valid_on_current_logical_core( &fs, NULL, 0, NULL );
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ {
+ (te_array+loop)->popped_flag = LOWERED;
+ LFDS710_FREELIST_SET_VALUE_IN_ELEMENT( (te_array+loop)->fe, te_array+loop );
+ lfds710_freelist_push( &fs, &(te_array+loop)->fe, NULL );
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ tpts[loop].fs = &fs;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_popping, &tpts[loop] );
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ lfds710_freelist_query( &fs, LFDS710_FREELIST_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ // TRD : now we check each element has popped_flag fet to RAISED
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( (te_array+loop)->popped_flag == LOWERED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : cleanup
+ lfds710_freelist_cleanup( &fs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping( void *libtest_threadset_per_thread_state )
+{
+ struct lfds710_freelist_element
+ *fe;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_element
+ *te;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( lfds710_freelist_pop(tpts->fs, &fe, NULL) )
+ {
+ te = LFDS710_FREELIST_GET_VALUE_FROM_ELEMENT( *fe );
+ te->popped_flag = RAISED;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element;
+
+struct test_per_thread_state
+{
+ struct lfds710_freelist_state
+ fs_thread_local,
+ *fs;
+
+ lfds710_pal_uint_t
+ number_elements_per_thread;
+};
+
+struct test_element
+{
+ struct lfds710_freelist_element
+ fe,
+ thread_local_fe;
+
+ lfds710_pal_uint_t
+ datum;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping_and_pushing_start_popping( void *libtest_threadset_per_thread_state );
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping_and_pushing_start_pushing( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_freelist_without_ea_popping_and_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_elements,
+ number_elements_per_thread,
+ number_logical_processors,
+ subloop;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_freelist_state
+ fs;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we have two threads per CPU
+ the threads loop for ten feconds
+ the first thread pushes 10000 elements then pops 10000 elements
+ the fecond thread pops 10000 elements then pushes 10000 elements
+ all pushes and pops go onto the single main freelist
+ with a per-thread local freelist to store the pops
+
+ after time is up, all threads push what they have remaining onto
+ the main freelist
+
+ we then validate the main freelist
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ // TRD : allocate
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors * 2, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors * 2, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / (number_logical_processors * 2);
+
+ lfds710_freelist_init_valid_on_current_logical_core( &fs, NULL, 0, NULL );
+
+ // TRD : half of all elements in the main freelist so the popping threads can start immediately
+ for( loop = 0 ; loop < number_elements_per_thread * number_logical_processors ; loop++ )
+ {
+ (te_array+loop)->datum = loop;
+ LFDS710_FREELIST_SET_VALUE_IN_ELEMENT( (te_array+loop)->fe, te_array+loop );
+ lfds710_freelist_push( &fs, &(te_array+loop)->fe, NULL );
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ // TRD : first fet of threads (poppers)
+ (tpts+loop)->fs = &fs;
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+ lfds710_freelist_init_valid_on_current_logical_core( &(tpts+loop)->fs_thread_local, NULL, 0, NULL );
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_popping_and_pushing_start_popping, &tpts[loop] );
+
+ // TRD : fecond fet of threads (pushers - who need elements in their per-thread freelists)
+ (tpts+loop+number_logical_processors)->fs = &fs;
+ (tpts+loop+number_logical_processors)->number_elements_per_thread = number_elements_per_thread;
+ lfds710_freelist_init_valid_on_current_logical_core( &(tpts+loop+number_logical_processors)->fs_thread_local, NULL, 0, NULL );
+ libtest_threadset_add_thread( &ts, &pts[loop+number_logical_processors], lp, thread_popping_and_pushing_start_pushing, &tpts[loop+number_logical_processors] );
+
+ for( subloop = number_elements_per_thread * (number_logical_processors + loop) ; subloop < number_elements_per_thread * (number_logical_processors + loop + 1) ; subloop++ )
+ {
+ LFDS710_FREELIST_SET_VALUE_IN_ELEMENT( (te_array+subloop)->thread_local_fe, (te_array+subloop) );
+ lfds710_freelist_push( &(tpts+loop+number_logical_processors)->fs_thread_local, &(te_array+subloop)->thread_local_fe, NULL );
+ }
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements_per_thread * number_logical_processors * 2;
+
+ lfds710_freelist_query( &fs, LFDS710_FREELIST_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) dvs );
+
+ lfds710_freelist_cleanup( &fs, NULL );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ lfds710_freelist_cleanup( &(tpts+loop)->fs_thread_local, NULL );
+ lfds710_freelist_cleanup( &(tpts+loop+number_logical_processors)->fs_thread_local, NULL );
+ }
+
+ return;
+}
+
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping_and_pushing_start_popping( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ count;
+
+ struct lfds710_freelist_element
+ *fe;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ start_time = time( NULL );
+
+ while( time(NULL) < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ count = 0;
+
+ while( count < tpts->number_elements_per_thread )
+ if( lfds710_freelist_pop(tpts->fs, &fe, NULL) )
+ {
+ lfds710_freelist_push( &tpts->fs_thread_local, fe, NULL );
+ count++;
+ }
+
+ // TRD : return our local freelist to the main freelist
+ while( lfds710_freelist_pop(&tpts->fs_thread_local, &fe, NULL) )
+ lfds710_freelist_push( tpts->fs, fe, NULL );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping_and_pushing_start_pushing( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ count;
+
+ struct lfds710_freelist_element
+ *fe;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ start_time = time( NULL );
+
+ while( time(NULL) < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ // TRD : return our local freelist to the main freelist
+ while( lfds710_freelist_pop(&tpts->fs_thread_local, &fe, NULL) )
+ lfds710_freelist_push( tpts->fs, fe, NULL );
+
+ count = 0;
+
+ while( count < tpts->number_elements_per_thread )
+ if( lfds710_freelist_pop(tpts->fs, &fe, NULL) )
+ {
+ lfds710_freelist_push( &tpts->fs_thread_local, fe, NULL );
+ count++;
+ }
+ }
+
+ // TRD : now push whatever we have in our local freelist
+ while( lfds710_freelist_pop(&tpts->fs_thread_local, &fe, NULL) )
+ lfds710_freelist_push( tpts->fs, fe, NULL );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ number_elements_per_thread,
+ thread_number;
+
+ struct lfds710_freelist_state
+ *fs;
+
+ struct test_element
+ *te_array;
+};
+
+struct test_element
+{
+ struct lfds710_freelist_element
+ fe;
+
+ lfds710_pal_uint_t
+ datum,
+ thread_number;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_pushing( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_freelist_without_ea_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop = 0,
+ number_elements,
+ number_elements_per_thread,
+ number_logical_processors,
+ *per_thread_counters;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_freelist_element
+ *fe;
+
+ struct lfds710_freelist_state
+ fs;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te,
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create an empty freelist
+
+ we then create one thread per CPU, where each thread
+ pushes 100,000 elements each as quickly as possible to the freelist
+
+ the data pushed is a counter and a thread ID
+
+ the threads exit when the freelist is full
+
+ we then validate the freelist;
+
+ checking that the counts increment on a per unique ID basis
+ and that the number of elements we pop equals 100,000 per thread
+ (since each element has an incrementing counter which is
+ unique on a per unique ID basis, we can know we didn't lofe
+ any elements)
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ // TRD : allocate
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ per_thread_counters = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * number_logical_processors, sizeof(lfds710_pal_uint_t) );
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / number_logical_processors;
+
+ // TRD : the main freelist
+ lfds710_freelist_init_valid_on_current_logical_core( &fs, NULL, 0, NULL );
+
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ (tpts+loop)->fs = &fs;
+ (tpts+loop)->thread_number = loop;
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+ (tpts+loop)->te_array = te_array + loop * number_elements_per_thread;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_pushing, &tpts[loop] );
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ /* TRD : no EA flush - it consists of the first elements
+ and flushing would break the order validation check below
+ */
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = number_elements_per_thread - 1;
+
+ vi.min_elements = vi.max_elements = number_elements_per_thread * number_logical_processors;
+
+ lfds710_freelist_query( &fs, LFDS710_FREELIST_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ /* TRD : the elimination array will be populates with a random set of freelist elements
+ we expect then up to that many elements to be out of order
+ */
+
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and lfds710_freelist_pop(&fs, &fe, NULL) )
+ {
+ te = LFDS710_FREELIST_GET_VALUE_FROM_ELEMENT( *fe );
+
+ if( te->thread_number >= number_logical_processors )
+ {
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( te->datum > per_thread_counters[te->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( te->datum < per_thread_counters[te->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( te->datum == per_thread_counters[te->thread_number] )
+ per_thread_counters[te->thread_number]--;
+ }
+
+ lfds710_freelist_cleanup( &fs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_pushing( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ for( loop = 0 ; loop < tpts->number_elements_per_thread ; loop++ )
+ {
+ (tpts->te_array+loop)->thread_number = tpts->thread_number;
+ (tpts->te_array+loop)->datum = loop;
+ }
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ for( loop = 0 ; loop < tpts->number_elements_per_thread ; loop++ )
+ {
+ LFDS710_FREELIST_SET_VALUE_IN_ELEMENT( (tpts->te_array+loop)->fe, tpts->te_array+loop );
+ lfds710_freelist_push( tpts->fs, &(tpts->te_array+loop)->fe, NULL );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ struct lfds710_freelist_state
+ *fs;
+};
+
+struct test_element
+{
+ struct lfds710_freelist_element
+ fe;
+
+ lfds710_pal_uint_t
+ datum;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_rapid_popping_and_pushing( void *libshared_threads_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_freelist_without_ea_rapid_popping_and_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ index = 0,
+ loop,
+ number_logical_processors;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_misc_validation_info
+ vi = { 0, 0 };
+
+ struct lfds710_freelist_state
+ fs;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : in these tests there is a fundamental antagonism between
+ how much checking/memory clean up that we do and the
+ likelyhood of collisions between threads in their lock-free
+ operations
+
+ the lock-free operations are very quick; if we do anything
+ much at all between operations, we greatly reduce the chance
+ of threads colliding
+
+ so we have some tests which do enough checking/clean up that
+ they can tell the freelist is valid and don't leak memory
+ and here, this test now is one of thofe which does minimal
+ checking - in fact, the nature of the test is that you can't
+ do any real checking - but goes very quickly
+
+ what we do is create a small freelist and then run one thread
+ per CPU, where each thread simply pushes and then immediately
+ pops
+
+ the test runs for ten feconds
+
+ after the test is done, the only check we do is to traverfe
+ the freelist, checking for loops and ensuring the number of
+ elements is correct
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ // TRD : allocate
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ te_array = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds710_freelist_init_valid_on_current_logical_core( &fs, NULL, 0, NULL );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ tpts[loop].fs = &fs;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ LFDS710_FREELIST_SET_VALUE_IN_ELEMENT( te_array[loop].fe, &te_array[loop] );
+ lfds710_freelist_push( &fs, &te_array[loop].fe, NULL );
+ }
+
+ // TRD : get the threads ready
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ libtest_threadset_add_thread( &ts, &pts[index], lp, thread_rapid_popping_and_pushing, &tpts[index] );
+ index++;
+ }
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_logical_processors;
+
+ lfds710_freelist_query( &fs, LFDS710_FREELIST_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ lfds710_freelist_cleanup( &fs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_rapid_popping_and_pushing( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ time_loop = 0;
+
+ struct lfds710_freelist_element
+ *fe;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ lfds710_freelist_pop( tpts->fs, &fe, NULL );
+ lfds710_freelist_push( tpts->fs, fe, NULL );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void libtest_tests_hash_a_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : struct lfds710_hash_a_element
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_hash_a_element,value) % LFDS710_PAL_ALIGN_SINGLE_POINTER != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ return;
+}
+
+#pragma warning( default : 4100 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_value, void const *value_in_tree );
+static void key_hash_function( void const *key, lfds710_pal_uint_t *hash );
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libtest_tests_hash_a_fail_and_overwrite_on_existing_key( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ enum lfds710_hash_a_insert_result
+ apr;
+
+ struct lfds710_hash_a_element
+ hae_one,
+ hae_two;
+
+ struct lfds710_hash_a_state
+ has;
+
+ struct lfds710_btree_au_state
+ *baus;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ baus = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct lfds710_btree_au_state) * 10, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ // TRD : fail on overwrite
+ lfds710_hash_a_init_valid_on_current_logical_core( &has, baus, 10, key_compare_function, key_hash_function, LFDS710_HASH_A_EXISTING_KEY_FAIL, NULL );
+
+ LFDS710_HASH_A_SET_KEY_IN_ELEMENT( hae_one, 1 );
+ LFDS710_HASH_A_SET_VALUE_IN_ELEMENT( hae_one, 0 );
+ apr = lfds710_hash_a_insert( &has, &hae_one, NULL );
+
+ if( apr != LFDS710_HASH_A_PUT_RESULT_SUCCESS )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ LFDS710_HASH_A_SET_KEY_IN_ELEMENT( hae_two, 1 );
+ LFDS710_HASH_A_SET_VALUE_IN_ELEMENT( hae_two, 1 );
+ apr = lfds710_hash_a_insert( &has, &hae_two, NULL );
+
+ if( apr != LFDS710_HASH_A_PUT_RESULT_FAILURE_EXISTING_KEY )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ lfds710_hash_a_cleanup( &has, NULL );
+
+ // TRD : success on overwrite
+ lfds710_hash_a_init_valid_on_current_logical_core( &has, baus, 10, key_compare_function, key_hash_function, LFDS710_HASH_A_EXISTING_KEY_OVERWRITE, NULL );
+
+ LFDS710_HASH_A_SET_KEY_IN_ELEMENT( hae_one, 1 );
+ LFDS710_HASH_A_SET_VALUE_IN_ELEMENT( hae_one, 1 );
+ apr = lfds710_hash_a_insert( &has, &hae_one, NULL );
+
+ if( apr != LFDS710_HASH_A_PUT_RESULT_SUCCESS )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ LFDS710_HASH_A_SET_KEY_IN_ELEMENT( hae_two, 1 );
+ LFDS710_HASH_A_SET_VALUE_IN_ELEMENT( hae_two, 1 );
+ apr = lfds710_hash_a_insert( &has, &hae_two, NULL );
+
+ if( apr != LFDS710_HASH_A_PUT_RESULT_SUCCESS_OVERWRITE )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ lfds710_hash_a_cleanup( &has, NULL );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *key_in_tree )
+{
+ int
+ cr = 0;
+
+ // TRD : key_new can be any value in its range
+ // TRD : key_in_tree can be any value in its range
+
+ if( (lfds710_pal_uint_t) new_key < (lfds710_pal_uint_t) key_in_tree )
+ cr = -1;
+
+ if( (lfds710_pal_uint_t) new_key > (lfds710_pal_uint_t) key_in_tree )
+ cr = 1;
+
+ return cr;
+}
+
+
+
+
+
+/****************************************************************************/
+static void key_hash_function( void const *key, lfds710_pal_uint_t *hash )
+{
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( hash != NULL );
+
+ *hash = 0;
+
+ /* TRD : this function iterates over the user data
+ and we are using the void pointer *as* key data
+ so here we need to pass in the addy of key
+ */
+
+ LFDS710_HASH_A_HASH_FUNCTION( (void *) &key, sizeof(lfds710_pal_uint_t), *hash );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds710_btree_au_element
+ baue;
+
+ lfds710_pal_uint_t
+ datum;
+};
+
+/***** private prototypes *****/
+static int key_compare_function( void const *new_key, void const *existing_key );
+static void key_hash_function( void const *key, lfds710_pal_uint_t *hash );
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libtest_tests_hash_a_iterate( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ *counter_array,
+ loop;
+
+ struct lfds710_hash_a_element
+ *hae;
+
+ struct lfds710_hash_a_iterate
+ hai;
+
+ struct lfds710_hash_a_state
+ has;
+
+ struct lfds710_hash_a_element
+ *element_array;
+
+ struct lfds710_btree_au_state
+ *baus,
+ *baus_thousand;
+
+ void
+ *value;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : single-threaded test
+ we create a single hash_a
+ we populate with 1000 elements
+ where key and value is the number of the element (e.g. 0 to 999)
+ we then allocate 1000 counters, init to 0
+ we then iterate
+ we increment each element as we see it in the iterate
+ if any are missing or seen more than once, problemo!
+
+ we do this once with a table of 10, to ensure each table has (or almost certainly has) something in
+ and then a second tiem with a table of 10000, to ensure some empty tables exist
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ counter_array = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * 1000, sizeof(lfds710_pal_uint_t) );
+ element_array = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct lfds710_hash_a_element) * 1000, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ baus = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct lfds710_btree_au_state) * 10, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ baus_thousand = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct lfds710_btree_au_state) * 1000, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ // TRD : first time around
+ lfds710_hash_a_init_valid_on_current_logical_core( &has, baus, 10, key_compare_function, key_hash_function, LFDS710_HASH_A_EXISTING_KEY_FAIL, NULL );
+
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ {
+ LFDS710_HASH_A_SET_KEY_IN_ELEMENT( *(element_array+loop), loop );
+ LFDS710_HASH_A_SET_VALUE_IN_ELEMENT( *(element_array+loop), loop );
+ lfds710_hash_a_insert( &has, element_array+loop, NULL );
+ }
+
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ *(counter_array+loop) = 0;
+
+ lfds710_hash_a_iterate_init( &has, &hai );
+
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and lfds710_hash_a_iterate(&hai, &hae) )
+ {
+ value = LFDS710_HASH_A_GET_VALUE_FROM_ELEMENT( *hae );
+ ( *(counter_array + (lfds710_pal_uint_t) value) )++;
+ }
+
+ if( *dvs == LFDS710_MISC_VALIDITY_VALID )
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ {
+ if( *(counter_array+loop) > 1 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( *(counter_array+loop) == 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+ }
+
+ lfds710_hash_a_cleanup( &has, NULL );
+
+ // TRD : second time around
+ if( *dvs == LFDS710_MISC_VALIDITY_VALID )
+ {
+ lfds710_hash_a_init_valid_on_current_logical_core( &has, baus_thousand, 10000, key_compare_function, key_hash_function, LFDS710_HASH_A_EXISTING_KEY_FAIL, NULL );
+
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ {
+ LFDS710_HASH_A_SET_KEY_IN_ELEMENT( *(element_array+loop), loop );
+ LFDS710_HASH_A_SET_VALUE_IN_ELEMENT( *(element_array+loop), loop );
+ lfds710_hash_a_insert( &has, element_array+loop, NULL );
+ }
+
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ *(counter_array+loop) = 0;
+
+ lfds710_hash_a_iterate_init( &has, &hai );
+
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and lfds710_hash_a_iterate(&hai, &hae) )
+ {
+ value = LFDS710_HASH_A_GET_VALUE_FROM_ELEMENT( *hae );
+ ( *(counter_array + (lfds710_pal_uint_t) value ) )++;
+ }
+
+ if( *dvs == LFDS710_MISC_VALIDITY_VALID )
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ {
+ if( *(counter_array+loop) > 1 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( *(counter_array+loop) == 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+ }
+
+ lfds710_hash_a_cleanup( &has, NULL );
+ }
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ int
+ cr = 0;
+
+ // TRD : new_key can be NULL (i.e. 0)
+ // TRD : existing_key can be NULL (i.e. 0)
+
+ if( (lfds710_pal_uint_t) new_key < (lfds710_pal_uint_t) existing_key )
+ cr = -1;
+
+ if( (lfds710_pal_uint_t) new_key > (lfds710_pal_uint_t) existing_key )
+ cr = 1;
+
+ return cr;
+}
+
+
+
+
+
+/****************************************************************************/
+static void key_hash_function( void const *key, lfds710_pal_uint_t *hash )
+{
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( hash != NULL );
+
+ *hash = 0;
+
+ /* TRD : this function iterates over the user data
+ and we are using the void pointer AS user data
+ so here we need to pass in the addy of value
+ */
+
+ LFDS710_HASH_A_HASH_FUNCTION( (void *) &key, sizeof(lfds710_pal_uint_t), *hash );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds710_hash_a_element
+ hae;
+
+ lfds710_pal_uint_t
+ datum,
+ key;
+};
+
+struct test_per_thread_state
+{
+ enum flag
+ error_flag;
+
+ lfds710_pal_uint_t
+ number_elements_per_thread;
+
+ struct lfds710_hash_a_state
+ *has;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_adding( void *libtest_threadset_per_thread_state );
+static int key_compare_function( void const *new_key, void const *existing_key );
+static void key_hash_function( void const *key, lfds710_pal_uint_t *hash );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_hash_a_random_adds_fail_on_existing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_elements_per_thread,
+ number_elements_total,
+ number_logical_processors,
+ offset,
+ temp,
+ value;
+
+ struct lfds710_hash_a_element
+ *hae;
+
+ struct lfds710_hash_a_state
+ has;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_btree_au_state
+ *baus;
+
+ struct lfds710_prng_state
+ ps;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *element_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create a single hash_a
+ we generate 100k elements per thread (with one thread per logical processor) in an array
+ each element is unique
+ we randomly sort the elements
+ then each thread loops, adds those elements into the hash_a
+ we check that each datum inserts okay - failure will occur on non-unique data, i.e. two identical keys
+ we should have no failures
+ we then call the hash_a validation function
+ then using the hash_a get() we check all the elements we added are present
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ baus = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct lfds710_btree_au_state) * 1000, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ element_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements_total );
+
+ // TRD : for correct rounding, for later code
+ number_elements_per_thread = number_elements_total / number_logical_processors;
+ number_elements_total = number_elements_per_thread * number_logical_processors;
+
+ lfds710_prng_init_valid_on_current_logical_core( &ps, LFDS710_PRNG_SEED );
+
+ lfds710_hash_a_init_valid_on_current_logical_core( &has, baus, 1000, key_compare_function, key_hash_function, LFDS710_HASH_A_EXISTING_KEY_FAIL, NULL );
+
+ // TRD : created an ordered list of unique numbers
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ {
+ (element_array+loop)->key = loop;
+ // TRD : + number_elements just to make it different to the key
+ (element_array+loop)->datum = loop + number_elements_total;
+ }
+
+ // TRD : now randomize them
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ {
+ LFDS710_PRNG_GENERATE( ps, offset );
+ offset %= number_elements_total;
+ temp = (element_array + offset)->key;
+ (element_array + offset)->key = (element_array + loop)->key;
+ (element_array + loop)->key = temp;
+ }
+
+ // TRD : get the threads ready
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->has = &has;
+ (tpts+loop)->element_array = element_array + number_elements_per_thread*loop;
+ (tpts+loop)->error_flag = LOWERED;
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_adding, &tpts[loop] );
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ // TRD : now for validation
+ vi.min_elements = vi.max_elements = number_elements_total;
+ lfds710_hash_a_query( &has, LFDS710_HASH_A_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) dvs );
+
+ /* TRD : now we attempt to lfds710_hash_a_get_by_key() for every element in number_array
+ any failure to find is an error
+ we also check we've obtained the correct element
+ */
+
+ for( loop = 0 ; *dvs == LFDS710_MISC_VALIDITY_VALID and loop < number_elements_total ; loop++ )
+ if( 0 == lfds710_hash_a_get_by_key(&has, NULL, NULL, (void *) (tpts->element_array+loop)->key, &hae) )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+ else
+ {
+ value = (lfds710_pal_uint_t) LFDS710_HASH_A_GET_VALUE_FROM_ELEMENT( *hae );
+ if( (tpts->element_array+loop)->datum != value )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+
+ // TRD : just check error_flags weren't raised
+ if( *dvs == LFDS710_MISC_VALIDITY_VALID )
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ if( (tpts+loop)->error_flag == RAISED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : cleanup
+ lfds710_hash_a_cleanup( &has, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_adding( void *libtest_threadset_per_thread_state )
+{
+ enum lfds710_hash_a_insert_result
+ apr;
+
+ lfds710_pal_uint_t
+ index = 0;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( index < tpts->number_elements_per_thread )
+ {
+ LFDS710_HASH_A_SET_KEY_IN_ELEMENT( (tpts->element_array+index)->hae, (tpts->element_array+index)->key );
+ LFDS710_HASH_A_SET_VALUE_IN_ELEMENT( (tpts->element_array+index)->hae, (tpts->element_array+index)->datum );
+ apr = lfds710_hash_a_insert( tpts->has, &(tpts->element_array+index)->hae, NULL );
+
+ if( apr == LFDS710_HASH_A_PUT_RESULT_FAILURE_EXISTING_KEY )
+ tpts->error_flag = RAISED;
+
+ index++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ int
+ cr = 0;
+
+ // TRD : new_key can be NULL (i.e. 0)
+ // TRD : existing_key can be NULL (i.e. 0)
+
+ if( (lfds710_pal_uint_t) new_key < (lfds710_pal_uint_t) existing_key )
+ cr = -1;
+
+ if( (lfds710_pal_uint_t) new_key > (lfds710_pal_uint_t) existing_key )
+ cr = 1;
+
+ return cr;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void key_hash_function( void const *key, lfds710_pal_uint_t *hash )
+{
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( hash != NULL );
+
+ *hash = 0;
+
+ /* TRD : this function iterates over the user data
+ and we are using the void pointer *as* key data
+ so here we need to pass in the addy of key
+ */
+
+ LFDS710_HASH_A_HASH_FUNCTION( (void *) &key, sizeof(lfds710_pal_uint_t), *hash );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds710_hash_a_element
+ hae;
+
+ lfds710_pal_uint_t
+ key;
+};
+
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ number_elements_per_thread,
+ overwrite_count;
+
+ struct lfds710_hash_a_state
+ *has;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_adding( void *libtest_threadset_per_thread_state );
+static int key_compare_function( void const *new_key, void const *existing_key );
+static void key_hash_function( void const *key, lfds710_pal_uint_t *hash );
+static int LIBTEST_PAL_STDLIB_CALLBACK_CALLING_CONVENTION qsort_and_bsearch_key_compare_function( void const *e1, void const *e2 );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_hash_a_random_adds_overwrite_on_existing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ int
+ rv;
+
+ lfds710_pal_uint_t
+ actual_sum_overwrite_existing_count,
+ expected_sum_overwrite_existing_count,
+ *key_count_array,
+ loop,
+ number_elements_per_thread,
+ number_elements_total,
+ number_logical_processors,
+ random_value;
+
+ struct lfds710_hash_a_iterate
+ hai;
+
+ struct lfds710_hash_a_element
+ *hae;
+
+ struct lfds710_hash_a_state
+ has;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_btree_au_state
+ *baus;
+
+ struct lfds710_prng_state
+ ps;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *element_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ void
+ *key_pointer,
+ *key;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create a single hash_a
+ we generate n elements per thread
+ each element contains a key value, which is set to a random value
+ (we don't use value, so it's just set to 0)
+ the threads then run, putting
+ the threads count their number of overwrite hits
+ once the threads are done, then we
+ count the number of each key
+ from this we figure out the min/max element for hash_a validation, so we call validation
+ we check the sum of overwrites for each thread is what it should be
+ then using the hash_a get() we check all the elements we expect are present
+ and then we iterate over the hash_a
+ checking we see each key once
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ baus = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct lfds710_btree_au_state) * 1000, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ element_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element) + sizeof(lfds710_pal_uint_t), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements_total );
+ key_count_array = (lfds710_pal_uint_t *) ( element_array + number_elements_total );
+
+ // TRD : per thread first, for correct rounding, for later code
+ number_elements_per_thread = number_elements_total / number_logical_processors;
+ number_elements_total = number_elements_per_thread * number_logical_processors;
+
+ lfds710_prng_init_valid_on_current_logical_core( &ps, LFDS710_PRNG_SEED );
+
+ lfds710_hash_a_init_valid_on_current_logical_core( &has, baus, 1000, key_compare_function, key_hash_function, LFDS710_HASH_A_EXISTING_KEY_OVERWRITE, NULL );
+
+ // TRD : created an ordered list of unique numbers
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ {
+ LFDS710_PRNG_GENERATE( ps, random_value );
+ (element_array+loop)->key = (lfds710_pal_uint_t) ( (number_elements_total/2) * ((double) random_value / (double) LFDS710_PRNG_MAX) );
+ }
+
+ // TRD : get the threads ready
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->has = &has;
+ (tpts+loop)->element_array = element_array + number_elements_per_thread*loop;
+ (tpts+loop)->overwrite_count = 0;
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_adding, &tpts[loop] );
+ loop++;
+ }
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ // TRD : now for validation
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ *(key_count_array+loop) = 0;
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ ( *(key_count_array + (element_array+loop)->key) )++;
+
+ vi.min_elements = number_elements_total;
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ if( *(key_count_array+loop) == 0 )
+ vi.min_elements--;
+
+ vi.max_elements = vi.min_elements;
+
+ lfds710_hash_a_query( &has, LFDS710_HASH_A_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) dvs );
+
+ expected_sum_overwrite_existing_count = 0;
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ if( *(key_count_array+loop) != 0 )
+ expected_sum_overwrite_existing_count += *(key_count_array+loop) - 1;
+
+ actual_sum_overwrite_existing_count = 0;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ actual_sum_overwrite_existing_count += (tpts+loop)->overwrite_count;
+
+ if( expected_sum_overwrite_existing_count != actual_sum_overwrite_existing_count )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : now loop over the expected array and check we can get() every element
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ if( *(key_count_array+loop) > 0 )
+ {
+ rv = lfds710_hash_a_get_by_key( &has, NULL, NULL, (void *) loop, &hae );
+
+ if( rv != 1 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+
+ /* TRD : now iterate, checking we find every element and no others
+ to do this in a timely manner, we need to qsort() the key values
+ and use bsearch() to check for items in the array
+ */
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ if( *(key_count_array+loop) != 0 )
+ *(key_count_array+loop) = loop;
+ else
+ *(key_count_array+loop) = 0;
+
+ qsort( key_count_array, number_elements_total, sizeof(lfds710_pal_uint_t), qsort_and_bsearch_key_compare_function );
+
+ lfds710_hash_a_iterate_init( &has, &hai );
+
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and lfds710_hash_a_iterate(&hai, &hae) )
+ {
+ key = LFDS710_HASH_A_GET_KEY_FROM_ELEMENT( *hae );
+
+ key_pointer = bsearch( &key, key_count_array, number_elements_total, sizeof(lfds710_pal_uint_t), qsort_and_bsearch_key_compare_function );
+
+ if( key_pointer == NULL )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+
+ // TRD : cleanup
+ lfds710_hash_a_cleanup( &has, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_adding( void *libtest_threadset_per_thread_state )
+{
+ enum lfds710_hash_a_insert_result
+ apr;
+
+ lfds710_pal_uint_t
+ index = 0;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( index < tpts->number_elements_per_thread )
+ {
+ LFDS710_HASH_A_SET_KEY_IN_ELEMENT( (tpts->element_array+index)->hae, (tpts->element_array+index)->key );
+ LFDS710_HASH_A_SET_VALUE_IN_ELEMENT( (tpts->element_array+index)->hae, 0 );
+ apr = lfds710_hash_a_insert( tpts->has, &(tpts->element_array+index)->hae, NULL );
+
+ if( apr == LFDS710_HASH_A_PUT_RESULT_SUCCESS_OVERWRITE )
+ tpts->overwrite_count++;
+
+ index++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int key_compare_function( void const *new_key, void const *existing_key )
+{
+ int
+ cr = 0;
+
+ // TRD : new_key can be NULL (i.e. 0)
+ // TRD : existing_key can be NULL (i.e. 0)
+
+ if( (lfds710_pal_uint_t) new_key < (lfds710_pal_uint_t) existing_key )
+ cr = -1;
+
+ if( (lfds710_pal_uint_t) new_key > (lfds710_pal_uint_t) existing_key )
+ cr = 1;
+
+ return cr;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static void key_hash_function( void const *key, lfds710_pal_uint_t *hash )
+{
+ // TRD : key can be NULL
+ LFDS710_PAL_ASSERT( hash != NULL );
+
+ *hash = 0;
+
+ /* TRD : this function iterates over the user data
+ and we are using the void pointer *as* key data
+ so here we need to pass in the addy of key
+ */
+
+ LFDS710_HASH_A_HASH_FUNCTION( (void *) &key, sizeof(lfds710_pal_uint_t), *hash );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+static int LIBTEST_PAL_STDLIB_CALLBACK_CALLING_CONVENTION qsort_and_bsearch_key_compare_function( void const *e1, void const *e2 )
+{
+ int
+ cr = 0;
+
+ lfds710_pal_uint_t
+ s1,
+ s2;
+
+ s1 = *(lfds710_pal_uint_t *) e1;
+ s2 = *(lfds710_pal_uint_t *) e2;
+
+ if( s1 > s2 )
+ cr = 1;
+
+ if( s1 < s2 )
+ cr = -1;
+
+ return cr;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libtest_internal.h"
+
+/***** defines *****/
+#define TEST_DURATION_IN_SECONDS 5
+#define TIME_LOOP_COUNT 10000
+#define REDUCED_TIME_LOOP_COUNT 1000
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void libtest_tests_list_aso_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : struct lfds710_list_aso_element
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_list_aso_element,next) % LFDS710_PAL_ALIGN_SINGLE_POINTER != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_list_aso_element,value) % LFDS710_PAL_ALIGN_SINGLE_POINTER != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : struct lfds710_list_asu_state
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_list_aso_state,dummy_element) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_list_aso_state,start) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ return;
+}
+
+#pragma warning( default : 4100 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds710_list_aso_element
+ lasoe;
+
+ lfds710_pal_uint_t
+ element_number,
+ thread_number;
+};
+
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ number_elements_per_thread;
+
+ struct lfds710_list_aso_state
+ *lasos;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static int new_ordered_compare_function( void const *value_new, void const *value_in_list );
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION new_ordered_thread( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_list_aso_new_ordered( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ expected_element_number,
+ number_elements,
+ number_elements_per_thread,
+ number_elements_total,
+ number_logical_processors,
+ offset,
+ temp;
+
+ struct lfds710_list_aso_element
+ *lasoe = NULL;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_list_aso_state
+ lasos;
+
+ struct lfds710_prng_state
+ ps;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *element_array,
+ *element;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : run one thread per logical processor
+ we have a single array of 10k elements per thread
+ this is set to be randomly ordered (but with contigious numbers from 0 to n)
+ we give 10k to each thread (a pointer into the array at the correct point)
+ which then loops through that array
+ calling lfds710_list_aso_insert_element_by_position( LFDS710_LIST_ASO_POSITION_ORDERED )
+ verification should show list is sorted
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ element_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / number_logical_processors;
+
+ lfds710_prng_init_valid_on_current_logical_core( &ps, LFDS710_PRNG_SEED );
+
+ lfds710_list_aso_init_valid_on_current_logical_core( &lasos, new_ordered_compare_function, LFDS710_LIST_ASO_INSERT_RESULT_FAILURE_EXISTING_KEY, NULL );
+
+ /* TRD : create randomly ordered number array with unique elements
+
+ unique isn't necessary - the list will sort anyway - but
+ it permits slightly better validation
+ */
+
+ // TRD : or the test takes a looooooong time...
+ if( number_elements_per_thread > 10000 )
+ number_elements_per_thread = 10000;
+
+ number_elements_total = number_elements_per_thread * number_logical_processors;
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ (element_array+loop)->element_number = loop;
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ {
+ LFDS710_PRNG_GENERATE( ps, offset );
+ offset %= number_elements_total;
+ temp = (element_array + offset)->element_number;
+ (element_array + offset)->element_number = (element_array + loop)->element_number;
+ (element_array + loop)->element_number = temp;
+ }
+
+ // TRD : get the threads ready
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->lasos = &lasos;
+ (tpts+loop)->element_array = element_array + (loop*number_elements_per_thread);
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, new_ordered_thread, &tpts[loop] );
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ /* TRD : validate the resultant list
+ iterate over the list
+ we expect to find the list is sorted,
+ which means that element_number will
+ increment from zero
+ */
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements_total;
+
+ lfds710_list_aso_query( &lasos, LFDS710_LIST_ASO_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ if( *dvs == LFDS710_MISC_VALIDITY_VALID )
+ {
+ expected_element_number = 0;
+
+ // TRD : traverse the list and check combined_data_array matches
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and LFDS710_LIST_ASO_GET_START_AND_THEN_NEXT(lasos, lasoe) )
+ {
+ element = LFDS710_LIST_ASO_GET_VALUE_FROM_ELEMENT( *lasoe );
+
+ if( element->element_number != expected_element_number++ )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+ }
+
+ lfds710_list_aso_cleanup( &lasos, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int new_ordered_compare_function( void const *value_new, void const *value_in_list )
+{
+ int
+ cr = 0;
+
+ struct test_element
+ *e1,
+ *e2;
+
+ // TRD : value_new can be any value in its range
+ // TRD : value_in_list can be any value in its range
+
+ e1 = (struct test_element *) value_new;
+ e2 = (struct test_element *) value_in_list;
+
+ if( e1->element_number < e2->element_number )
+ cr = -1;
+
+ if( e1->element_number > e2->element_number )
+ cr = 1;
+
+ return cr;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION new_ordered_thread( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ for( loop = 0 ; loop < tpts->number_elements_per_thread ; loop++ )
+ {
+ LFDS710_LIST_ASO_SET_KEY_IN_ELEMENT( (tpts->element_array+loop)->lasoe, tpts->element_array+loop );
+ LFDS710_LIST_ASO_SET_VALUE_IN_ELEMENT( (tpts->element_array+loop)->lasoe, tpts->element_array+loop );
+ lfds710_list_aso_insert( tpts->lasos, &(tpts->element_array+loop)->lasoe, NULL );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds710_list_aso_element
+ lasoe;
+
+ lfds710_pal_uint_t
+ element_number,
+ thread_number;
+};
+
+struct test_per_thread_state
+{
+ enum flag
+ error_flag;
+
+ lfds710_pal_uint_t
+ number_elements_per_thread;
+
+ struct lfds710_list_aso_state
+ *lasos;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static int new_ordered_with_cursor_compare_function( void const *value_new, void const *value_in_list );
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION new_ordered_with_cursor_insert_thread( void *libtest_threadset_per_thread_state );
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION new_ordered_with_cursor_cursor_thread( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_list_aso_new_ordered_with_cursor( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_elements,
+ number_elements_per_thread,
+ number_elements_total,
+ number_logical_processors,
+ offset,
+ temp;
+
+ struct lfds710_list_aso_state
+ lasos;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_prng_state
+ ps;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *element_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : run two threads per logical processor
+
+ the test runs for 10 seconds
+
+ the first thread loops over a pre-set list of random numbers
+ continually adding them using ordered insert
+
+ the second thread keeps iterating over the list, checking that
+ each element is larger than its predecessor
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors * 2, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors * 2, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ element_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / number_logical_processors;
+
+ lfds710_prng_init_valid_on_current_logical_core( &ps, LFDS710_PRNG_SEED );
+
+ lfds710_list_aso_init_valid_on_current_logical_core( &lasos, new_ordered_with_cursor_compare_function, LFDS710_LIST_ASO_INSERT_RESULT_FAILURE_EXISTING_KEY, NULL );
+
+ /* TRD : create randomly ordered number array with unique elements
+
+ unique isn't necessary - the list will sort anyway - but
+ it permits slightly better validation
+ */
+
+ // TRD : or the test takes a looooooong time...
+ if( number_elements_per_thread > 1000 )
+ number_elements_per_thread = 1000;
+
+ number_elements_total = number_elements_per_thread * number_logical_processors;
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ (element_array+loop)->element_number = loop;
+
+ for( loop = 0 ; loop < number_elements_total ; loop++ )
+ {
+ LFDS710_PRNG_GENERATE( ps, offset );
+ offset %= number_elements_total;
+ temp = (element_array + offset)->element_number;
+ (element_array + offset)->element_number = (element_array + loop)->element_number;
+ (element_array + loop)->element_number = temp;
+ }
+
+ // TRD : get the threads ready
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ // TRD : the insert threads
+ (tpts+loop)->lasos = &lasos;
+ (tpts+loop)->element_array = element_array + number_elements_per_thread*loop;
+ (tpts+loop)->error_flag = LOWERED;
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, new_ordered_with_cursor_insert_thread, &tpts[loop] );
+
+ // TRD : the cursor threads
+ (tpts+loop+number_logical_processors)->lasos = &lasos;
+ (tpts+loop+number_logical_processors)->element_array = NULL;
+ (tpts+loop+number_logical_processors)->error_flag = LOWERED;
+ libtest_threadset_add_thread( &ts, &pts[loop+number_logical_processors], lp, new_ordered_with_cursor_cursor_thread, &tpts[loop+number_logical_processors] );
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ /* TRD : validate the resultant list
+
+ the cursor threads were checking for orderedness
+ if that failed, they raise their error_flag
+ so validate the list, then check error_flags
+ */
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements_total;
+
+ lfds710_list_aso_query( &lasos, LFDS710_LIST_ASO_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ if( *dvs == LFDS710_MISC_VALIDITY_VALID )
+ for( loop = number_logical_processors ; loop < number_logical_processors * 2 ; loop++ )
+ if( (tpts+loop)->error_flag == RAISED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ORDER;
+
+ lfds710_list_aso_cleanup( &lasos, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+static int new_ordered_with_cursor_compare_function( void const *value_new, void const *value_in_list )
+{
+ int
+ cr = 0;
+
+ struct test_element
+ *e1,
+ *e2;
+
+ // TRD : value_new can be any value in its range
+ // TRD : value_in_list can be any value in its range
+
+ e1 = (struct test_element *) value_new;
+ e2 = (struct test_element *) value_in_list;
+
+ if( e1->element_number < e2->element_number )
+ cr = -1;
+
+ if( e1->element_number > e2->element_number )
+ cr = 1;
+
+ return cr;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION new_ordered_with_cursor_insert_thread( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ for( loop = 0 ; loop < tpts->number_elements_per_thread ; loop++ )
+ {
+ LFDS710_LIST_ASO_SET_KEY_IN_ELEMENT( (tpts->element_array+loop)->lasoe, tpts->element_array+loop );
+ LFDS710_LIST_ASO_SET_VALUE_IN_ELEMENT( (tpts->element_array+loop)->lasoe, tpts->element_array+loop );
+ lfds710_list_aso_insert( tpts->lasos, &(tpts->element_array+loop)->lasoe, NULL );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION new_ordered_with_cursor_cursor_thread( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ prev_element_number;
+
+ lfds710_pal_uint_t
+ time_loop = 0;
+
+ struct lfds710_list_aso_element
+ *lasoe;
+
+ struct test_element
+ *element;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ prev_element_number = 0;
+
+ lasoe = LFDS710_LIST_ASO_GET_START( *tpts->lasos );
+
+ // TRD : we may get start before any element has been added to the list
+ if( lasoe == NULL )
+ continue;
+
+ element = LFDS710_LIST_ASO_GET_VALUE_FROM_ELEMENT( *lasoe );
+
+ if( element->element_number < prev_element_number )
+ tpts->error_flag = RAISED;
+
+ prev_element_number = element->element_number;
+
+ lasoe = LFDS710_LIST_ASO_GET_NEXT( *lasoe );
+
+ while( lasoe != NULL )
+ {
+ element = LFDS710_LIST_ASO_GET_VALUE_FROM_ELEMENT( *lasoe );
+
+ if( element->element_number <= prev_element_number )
+ tpts->error_flag = RAISED;
+
+ prev_element_number = element->element_number;
+
+ lasoe = LFDS710_LIST_ASO_GET_NEXT( *lasoe );
+ }
+
+ if( time_loop++ == REDUCED_TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void libtest_tests_list_asu_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : struct lfds710_list_asu_element
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_list_asu_element,next) % LFDS710_PAL_ALIGN_SINGLE_POINTER != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_list_asu_element,value) % LFDS710_PAL_ALIGN_SINGLE_POINTER != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : struct lfds710_list_asu_state
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_list_asu_state,dummy_element) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_list_asu_state,end) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_list_asu_state,start) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ return;
+}
+
+#pragma warning( default : 4100 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds710_list_asu_element
+ lasue;
+
+ lfds710_pal_uint_t
+ element_number,
+ thread_number;
+};
+
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ number_elements_per_thread;
+
+ struct lfds710_list_asu_state
+ *lasus;
+
+ struct test_element
+ *element_array;
+
+ struct lfds710_list_asu_element
+ *first_element;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION new_after_thread( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_list_asu_new_after( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_elements,
+ number_elements_per_thread,
+ number_logical_processors,
+ *per_thread_counters,
+ subloop;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL,
+ first_element;
+
+ struct lfds710_list_asu_state
+ lasus;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *element_array,
+ *element;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : run one thread per logical processor
+ run for 250k elements
+ we put a single first element into the list and
+ each thread loops, calling lfds710_list_asu_new_element_by_position( LFDS710_LIST_ASU_POSITION_AFTER ),
+ inserting after the single first element
+ data element contain s thread_number and element_number
+ verification should show element_number decreasing on a per thread basis
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ per_thread_counters = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ element_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / number_logical_processors;
+
+ lfds710_list_asu_init_valid_on_current_logical_core( &lasus, NULL );
+
+ LFDS710_LIST_ASU_SET_KEY_IN_ELEMENT( first_element, NULL );
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( first_element, NULL );
+ lfds710_list_asu_insert_at_position( &lasus, &first_element, NULL, LFDS710_LIST_ASU_POSITION_START );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ for( subloop = 0 ; subloop < number_elements_per_thread ; subloop++ )
+ {
+ (element_array+(loop*number_elements_per_thread)+subloop)->thread_number = loop;
+ (element_array+(loop*number_elements_per_thread)+subloop)->element_number = subloop;
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ (tpts+loop)->lasus = &lasus;
+ (tpts+loop)->element_array = element_array + (loop*number_elements_per_thread);
+ (tpts+loop)->first_element = &first_element;
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, new_after_thread, &tpts[loop] );
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ /* TRD : validate the resultant list
+ iterate over each element
+ we expect to find element numbers increment on a per thread basis
+ */
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = (number_elements_per_thread * number_logical_processors) + 1;
+
+ lfds710_list_asu_query( &lasus, LFDS710_LIST_ASU_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = number_elements_per_thread - 1;
+
+ /* TRD : we have a leading element, after which all inserts occurred
+ we need to get past that element for validation
+ this is why we're not using lfds710_list_asu_get_start_and_then_next()
+ */
+
+ lasue = LFDS710_LIST_ASU_GET_START( lasus );
+
+ lasue = LFDS710_LIST_ASU_GET_NEXT( *lasue );
+
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and lasue != NULL )
+ {
+ element = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ if( element->thread_number >= number_logical_processors )
+ {
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( element->element_number < per_thread_counters[element->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( element->element_number > per_thread_counters[element->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( element->element_number == per_thread_counters[element->thread_number] )
+ per_thread_counters[element->thread_number]--;
+
+ lasue = LFDS710_LIST_ASU_GET_NEXT( *lasue );
+ }
+
+ lfds710_list_asu_cleanup( &lasus, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION new_after_thread( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ for( loop = 0 ; loop < tpts->number_elements_per_thread ; loop++ )
+ {
+ LFDS710_LIST_ASU_SET_KEY_IN_ELEMENT( (tpts->element_array+loop)->lasue, tpts->element_array+loop );
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( (tpts->element_array+loop)->lasue, tpts->element_array+loop );
+ lfds710_list_asu_insert_at_position( tpts->lasus, &(tpts->element_array+loop)->lasue, tpts->first_element, LFDS710_LIST_ASU_POSITION_AFTER );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds710_list_asu_element
+ lasue;
+
+ lfds710_pal_uint_t
+ element_number,
+ thread_number;
+};
+
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ number_elements_per_thread;
+
+ struct lfds710_list_asu_state
+ *lasus;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION new_end_thread( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_list_asu_new_end( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_elements,
+ number_elements_per_thread,
+ number_logical_processors,
+ *per_thread_counters,
+ subloop;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_list_asu_state
+ lasus;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *element_array,
+ *element;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : run one thread per logical processor
+ run for 250k elements
+ each thread loops, calling lfds710_list_asu_new_element_by_position( LFDS710_LIST_ASU_POSITION_END )
+ data element contain a thread_number and element_number
+ verification should show element_number increasing on a per thread basis
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ per_thread_counters = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ element_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / number_logical_processors;
+
+ lfds710_list_asu_init_valid_on_current_logical_core( &lasus, NULL );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ for( subloop = 0 ; subloop < number_elements_per_thread ; subloop++ )
+ {
+ (element_array+(loop*number_elements_per_thread)+subloop)->thread_number = loop;
+ (element_array+(loop*number_elements_per_thread)+subloop)->element_number = subloop;
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ (tpts+loop)->lasus = &lasus;
+ (tpts+loop)->element_array = element_array + (loop*number_elements_per_thread);
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, new_end_thread, &tpts[loop] );
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ /* TRD : validate the resultant list
+ iterate over each element
+ we expect to find element numbers increment on a per thread basis
+ */
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements_per_thread * number_logical_processors;
+
+ lfds710_list_asu_query( &lasus, LFDS710_LIST_ASU_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ lasue = NULL;
+
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(lasus, lasue) )
+ {
+ element = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ if( element->thread_number >= number_logical_processors )
+ {
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( element->element_number > per_thread_counters[element->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( element->element_number < per_thread_counters[element->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( element->element_number == per_thread_counters[element->thread_number] )
+ per_thread_counters[element->thread_number]++;
+ }
+
+ lfds710_list_asu_cleanup( &lasus, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION new_end_thread( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ for( loop = 0 ; loop < tpts->number_elements_per_thread ; loop++ )
+ {
+ LFDS710_LIST_ASU_SET_KEY_IN_ELEMENT( (tpts->element_array+loop)->lasue, tpts->element_array+loop );
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( (tpts->element_array+loop)->lasue, tpts->element_array+loop );
+ lfds710_list_asu_insert_at_position( tpts->lasus, &(tpts->element_array+loop)->lasue, NULL, LFDS710_LIST_ASU_POSITION_END );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds710_list_asu_element
+ lasue;
+
+ lfds710_pal_uint_t
+ element_number,
+ thread_number;
+};
+
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ number_elements_per_thread;
+
+ struct lfds710_list_asu_state
+ *lasus;
+
+ struct test_element
+ *element_array;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION new_start_thread( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_list_asu_new_start( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_elements,
+ number_elements_per_thread,
+ number_logical_processors,
+ *per_thread_counters,
+ subloop;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_list_asu_state
+ lasus;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct test_element
+ *element_array,
+ *element;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : run one thread per logical processor
+ run for 250k elements
+ each thread loops, calling lfds710_list_asu_new_element_by_position( LFDS710_LIST_ASU_POSITION_START )
+ data element contain s thread_number and element_number
+ verification should show element_number decreasing on a per thread basis
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ per_thread_counters = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ element_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / number_logical_processors;
+
+ lfds710_list_asu_init_valid_on_current_logical_core( &lasus, NULL );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ for( subloop = 0 ; subloop < number_elements_per_thread ; subloop++ )
+ {
+ (element_array+(loop*number_elements_per_thread)+subloop)->thread_number = loop;
+ (element_array+(loop*number_elements_per_thread)+subloop)->element_number = subloop;
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ (tpts+loop)->lasus = &lasus;
+ (tpts+loop)->element_array = element_array + (loop*number_elements_per_thread);
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, new_start_thread, &tpts[loop] );
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ /* TRD : validate the resultant list
+ iterate over each element
+ we expect to find element numbers increment on a per thread basis
+ */
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements_per_thread * number_logical_processors;
+
+ lfds710_list_asu_query( &lasus, LFDS710_LIST_ASU_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = number_elements_per_thread - 1;
+
+ lasue = NULL;
+
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(lasus, lasue) )
+ {
+ element = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ if( element->thread_number >= number_logical_processors )
+ {
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( element->element_number < per_thread_counters[element->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( element->element_number > per_thread_counters[element->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( element->element_number == per_thread_counters[element->thread_number] )
+ per_thread_counters[element->thread_number]--;
+ }
+
+ lfds710_list_asu_cleanup( &lasus, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION new_start_thread( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ for( loop = 0 ; loop < tpts->number_elements_per_thread ; loop++ )
+ {
+ LFDS710_LIST_ASU_SET_KEY_IN_ELEMENT( (tpts->element_array+loop)->lasue, tpts->element_array+loop );
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( (tpts->element_array+loop)->lasue, tpts->element_array+loop );
+ lfds710_list_asu_insert_at_position( tpts->lasus, &(tpts->element_array+loop)->lasue, NULL, LFDS710_LIST_ASU_POSITION_START );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** private prototyps *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_add( void *libtest_threadset_per_thread_state );
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_atomic_add( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_pal_atomic_add( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t volatile LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ number_logical_processors,
+ atomic_shared_counter,
+ shared_counter;
+
+ lfds710_pal_uint_t
+ loop = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : here we test abstraction_atomic_add
+
+ first, we run one thread per CPU where each thread adds
+ a shared counter 10,000,000 times - however, this first test
+ does NOT use atomic add; it uses "++"
+
+ second, we repeat the exercise, but this time using
+ abstraction_add()
+
+ if the final value in the first test is less than (10,000,000*asi->number_of_components[LFDS710_ABSTRACTION_COMPONENT_LOGICAL_PROCESSOR])
+ then the system is sensitive to non-atomic adds; this means if
+ our atomic version of the test passes, we can have some degree of confidence
+ that it works
+
+ if the final value in the first test is in fact correct, then we can't know
+ that our atomic version has changed anything
+
+ and of course if the final value in the atomic test is wrong, we know things
+ are broken
+ */
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors * 2, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ shared_counter = 0;
+ atomic_shared_counter = 0;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ // TRD : non-atomic
+
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_add, (void *) &shared_counter );
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+ lfds710_misc_force_store();
+ libtest_threadset_run( &ts );
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : atomic
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_atomic_add, (void *) &atomic_shared_counter );
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+ lfds710_misc_force_store();
+ libtest_threadset_run( &ts );
+ libtest_threadset_cleanup( &ts );
+ LFDS710_MISC_BARRIER_LOAD;
+
+ /* TRD : results
+
+ on a single core, "++" and atomic add should be equal
+
+ if we find our non-atomic test passes, then we can't really say anything
+ about whether or not the atomic test is really working
+ */
+
+ if( number_logical_processors == 1 )
+ {
+ if( shared_counter == (10000000 * number_logical_processors) and atomic_shared_counter == (10000000 * number_logical_processors) )
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ if( shared_counter != (10000000 * number_logical_processors) or atomic_shared_counter != (10000000 * number_logical_processors) )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ATOMIC_FAILED;
+ }
+
+ if( number_logical_processors >= 2 )
+ {
+ if( shared_counter < (10000000 * number_logical_processors) and atomic_shared_counter == (10000000 * number_logical_processors) )
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ if( shared_counter == (10000000 * number_logical_processors) and atomic_shared_counter == (10000000 * number_logical_processors) )
+ *dvs = LFDS710_MISC_VALIDITY_INDETERMINATE_NONATOMIC_PASSED;
+
+ if( atomic_shared_counter < (10000000 * number_logical_processors) )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ATOMIC_FAILED;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_add( void *libtest_threadset_per_thread_state )
+{
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ lfds710_pal_uint_t volatile
+ *shared_counter;
+
+ lfds710_pal_uint_t volatile
+ count = 0;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ shared_counter = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( count++ < 10000000 )
+ (*shared_counter)++;
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_atomic_add( void *libtest_threadset_per_thread_state )
+{
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ lfds710_pal_uint_t volatile
+ result,
+ *shared_counter;
+
+ lfds710_pal_uint_t volatile
+ count = 0;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ shared_counter = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( count++ < 10000000 )
+ {
+ LFDS710_PAL_ATOMIC_ADD( shared_counter, 1, result, lfds710_pal_uint_t );
+ (void) result;
+ }
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct libtest_tests_pal_atomic_cas_state
+{
+ lfds710_pal_uint_t
+ local_counter;
+
+ lfds710_pal_uint_t volatile
+ *shared_counter;
+};
+
+/***** private prototyps *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_cas( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_pal_atomic_cas( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t volatile LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ shared_counter;
+
+ lfds710_pal_uint_t
+ local_total = 0;
+
+ lfds710_pal_uint_t
+ loop = 0,
+ number_logical_processors;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct libtest_tests_pal_atomic_cas_state
+ *atcs;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : here we test pal_cas
+
+ we run one thread per CPU
+ we use pal_cas() to increment a shared counter
+ every time a thread successfully increments the counter,
+ it increments a thread local counter
+ the threads run for ten seconds
+ after the threads finish, we total the local counters
+ they should equal the shared counter
+ */
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ atcs = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_tests_pal_atomic_cas_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ shared_counter = 0;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (atcs+loop)->shared_counter = &shared_counter;
+ (atcs+loop)->local_counter = 0;
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_cas, atcs+loop );
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ local_total += (atcs+loop)->local_counter;
+
+ if( local_total == shared_counter )
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ if( local_total != shared_counter )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_cas( void *libtest_threadset_per_thread_state )
+{
+ char unsigned
+ result;
+
+ lfds710_pal_uint_t
+ loop = 0;
+
+ lfds710_pal_uint_t LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_SINGLE_POINTER)
+ exchange,
+ compare;
+
+ struct libtest_tests_pal_atomic_cas_state
+ *atcs;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ atcs = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( loop++ < 10000000 )
+ {
+ compare = *atcs->shared_counter;
+
+ do
+ {
+ exchange = compare + 1;
+ LFDS710_PAL_ATOMIC_CAS( atcs->shared_counter, &compare, exchange, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+ }
+ while( result == 0 );
+
+ atcs->local_counter++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct libtest_tests_pal_atomic_dwcas_state
+{
+ lfds710_pal_uint_t
+ local_counter;
+
+ lfds710_pal_uint_t volatile
+ (*shared_counter)[2];
+};
+
+/***** private prototyps *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_dwcas( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_pal_atomic_dwcas( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ local_total = 0,
+ loop,
+ number_logical_processors;
+
+ lfds710_pal_uint_t volatile LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ shared_counter[2] = { 0, 0 };
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct libtest_tests_pal_atomic_dwcas_state
+ *atds;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : here we test pal_dwcas
+
+ we run one thread per CPU
+ we use pal_dwcas() to increment a shared counter
+ every time a thread successfully increments the counter,
+ it increments a thread local counter
+ the threads run for ten seconds
+ after the threads finish, we total the local counters
+ they should equal the shared counter
+ */
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ atds = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_tests_pal_atomic_dwcas_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (atds+loop)->shared_counter = &shared_counter;
+ (atds+loop)->local_counter = 0;
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_dwcas, atds+loop );
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : results
+ LFDS710_MISC_BARRIER_LOAD;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ local_total += (atds+loop)->local_counter;
+
+ if( local_total == shared_counter[0] )
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ if( local_total != shared_counter[0] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ return;
+}
+
+#pragma warning( disable : 4702 )
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_dwcas( void *libtest_threadset_per_thread_state )
+{
+ char unsigned
+ result;
+
+ lfds710_pal_uint_t
+ loop = 0;
+
+ lfds710_pal_uint_t LFDS710_PAL_ALIGN(LFDS710_PAL_ALIGN_DOUBLE_POINTER)
+ exchange[2],
+ compare[2];
+
+ struct libtest_tests_pal_atomic_dwcas_state
+ *atds;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ atds = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( loop++ < 10000000 )
+ {
+ compare[0] = (*atds->shared_counter)[0];
+ compare[1] = (*atds->shared_counter)[1];
+
+ do
+ {
+ exchange[0] = compare[0] + 1;
+ exchange[1] = compare[1];
+ LFDS710_PAL_ATOMIC_DWCAS( (*atds->shared_counter), compare, exchange, LFDS710_MISC_CAS_STRENGTH_WEAK, result );
+ }
+ while( result == 0 );
+
+ atds->local_counter++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ counter,
+ *counter_array,
+ number_elements_per_thread,
+ number_logical_processors;
+
+ lfds710_pal_uint_t volatile
+ *shared_exchange;
+};
+
+/***** private prototyps *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_exchange( void *libtest_threadset_per_thread_state );
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_atomic_exchange( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_pal_atomic_exchange( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ enum flag
+ atomic_exchange_success_flag = RAISED,
+ exchange_success_flag = RAISED;
+
+ lfds710_pal_uint_t
+ loop,
+ *merged_counter_arrays,
+ number_elements,
+ number_elements_per_thread,
+ number_logical_processors,
+ subloop;
+
+ lfds710_pal_uint_t volatile LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ exchange = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : here we test pal_atomic_exchange
+
+ we have one thread per logical core
+ there is one variable which every thread will exchange to/from
+ we know the number of logical cores
+ the threads have a counter each, which begins with their logical core number plus one
+ (plus one because the exchange counter begins with 0 already in place)
+ (e.g. thread 0 begins with its counter at 1, thread 1 begins with its counter at 2, etc)
+
+ there is an array per thread of 1 million elements, each a counter, set to 0
+
+ when running, each thread increments its counter by the number of threads
+ the threads busy loop, exchanging
+ every time aa thread pulls a number off the central, shared exchange variable,
+ it increments the counter for that variable in its thread-local counter array
+
+ (we're not using a global array, because we'd have to be atomic in our increments,
+ which is a slow-down we don't want)
+
+ at the end, we merge all the counter arrays and if the frequency for a counter is a value
+ other than 1, the exchange was not atomic
+
+ we perform the test twice, once with pal_atomic_exchange, once with a non-atomic exchange
+
+ we expect the atomic to pass and the non-atomic to fail
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ merged_counter_arrays = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(lfds710_pal_uint_t), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ /* TRD : one array per thread, one array for merging
+ +1 as we need store for a merged counter array
+ */
+
+ number_elements_per_thread = number_elements / (number_logical_processors+1);
+
+ // TRD : non-atomic
+
+ for( loop = 0 ; loop < number_elements_per_thread ; loop++ )
+ *(merged_counter_arrays+loop) = 0;
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ (tpts+loop)->counter = loop + 1;
+ // TRD : +1 on loop to move past merged_counter_arrays
+ (tpts+loop)->counter_array = merged_counter_arrays + ((loop+1)*number_elements_per_thread);
+ for( subloop = 0 ; subloop < number_elements_per_thread ; subloop++ )
+ *((tpts+loop)->counter_array+subloop) = 0;
+ (tpts+loop)->number_logical_processors = number_logical_processors;
+ (tpts+loop)->shared_exchange = &exchange;
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_exchange, &tpts[loop] );
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ for( loop = 0 ; loop < number_elements_per_thread ; loop++ )
+ for( subloop = 0 ; subloop < number_logical_processors ; subloop++ )
+ *(merged_counter_arrays+loop) += *( (tpts+subloop)->counter_array+loop );
+
+ /* TRD : the worker threads exit when their per-thread counter exceeds number_elements_per_thread
+ as such the final number_logical_processors numbers are not read
+ we could change the threads to exit when the number they read exceeds number_elements_per_thread
+ but then we'd need an if() in their work-loop,
+ and we want to go as fast as possible
+ */
+
+ for( loop = 0 ; loop < number_elements_per_thread - number_logical_processors ; loop++ )
+ if( *(merged_counter_arrays+loop) != 1 )
+ exchange_success_flag = LOWERED;
+
+ // TRD : now for atomic exchange - we need to re-init the data structures
+
+ for( loop = 0 ; loop < number_elements_per_thread ; loop++ )
+ *(merged_counter_arrays+loop) = 0;
+
+ libtest_threadset_init( &ts, NULL );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ for( subloop = 0 ; subloop < number_elements_per_thread ; subloop++ )
+ *((tpts+loop)->counter_array+subloop) = 0;
+
+ loop = 0;
+ lasue = NULL;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_atomic_exchange, &tpts[loop] );
+ loop++;
+ }
+
+ exchange = 0;
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ for( loop = 0 ; loop < number_elements_per_thread ; loop++ )
+ for( subloop = 0 ; subloop < number_logical_processors ; subloop++ )
+ *(merged_counter_arrays+loop) += *( (tpts+subloop)->counter_array+loop );
+
+ for( loop = 0 ; loop < number_elements_per_thread - number_logical_processors ; loop++ )
+ if( *(merged_counter_arrays+loop) != 1 )
+ atomic_exchange_success_flag = LOWERED;
+
+ /* TRD : results
+
+ on a single core, atomic and non-atomic exchange should both work
+
+ if we find our non-atomic test passes, then we can't really say anything
+ about whether or not the atomic test is really working
+ */
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ if( number_logical_processors == 1 )
+ {
+ if( exchange_success_flag == RAISED and atomic_exchange_success_flag == RAISED )
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ if( exchange_success_flag != RAISED or atomic_exchange_success_flag != RAISED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ATOMIC_FAILED;
+ }
+
+ if( number_logical_processors >= 2 )
+ {
+ if( atomic_exchange_success_flag == RAISED and exchange_success_flag == LOWERED )
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ if( atomic_exchange_success_flag == RAISED and exchange_success_flag == RAISED )
+ *dvs = LFDS710_MISC_VALIDITY_INDETERMINATE_NONATOMIC_PASSED;
+
+ if( atomic_exchange_success_flag == LOWERED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ATOMIC_FAILED;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_exchange( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ local_counter,
+ exchange;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ local_counter = tpts->counter;
+
+ while( local_counter < tpts->number_elements_per_thread )
+ {
+ exchange = *tpts->shared_exchange;
+ *tpts->shared_exchange = local_counter;
+
+ ( *(tpts->counter_array + exchange) )++;
+
+ local_counter += tpts->number_logical_processors;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_atomic_exchange( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ local_counter,
+ exchange;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ local_counter = tpts->counter;
+
+ while( local_counter < tpts->number_elements_per_thread )
+ {
+ exchange = local_counter;
+
+ LFDS710_PAL_ATOMIC_EXCHANGE( tpts->shared_exchange, exchange, lfds710_pal_uint_t );
+
+ // TRD : increment the original value in shared_exchange, which exchange has now been set to
+ ( *(tpts->counter_array + exchange) )++;
+
+ local_counter += (lfds710_pal_uint_t) tpts->number_logical_processors;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void libtest_tests_prng_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : struct lfds710_prng_state
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_prng_state,entropy) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ return;
+}
+
+#pragma warning( default : 4100 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ struct lfds710_prng_state
+ *ps;
+
+ lfds710_pal_uint_t
+ read_index,
+ *output_array;
+};
+
+/***** private prototyps *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_generate( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_prng_generate( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ enum flag
+ duplicate_flag = LOWERED,
+ finished_flag = LOWERED;
+
+ lfds710_pal_uint_t
+ *output_arrays,
+ index = 0,
+ loop = 0,
+ mean = 0,
+ *merged_output_arrays,
+ merged_write_index = 0,
+ number_logical_processors,
+ smallest_prng_value,
+ ten_percent,
+ thread_to_bump = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct lfds710_prng_state
+ ps;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : here we test the atomic PRNG
+ we create an array, an output buffer, of 128 elements per thread
+ we have a single, global PRNG
+ we start all the threads and let them run for test duration seconds
+ (to ensure they are all running together)
+ each thread loops, writing new numbers to its output array
+ obviously in test duration seconds it will write many more than 128 elements -
+ it just loops over the output array
+
+ then when we're done we merge sort the output arrays (no qsort, not using standard library)
+ the number of duplicates should be 0
+ and the standard deviation should be 25% of LFDS710_PRNG_MAX
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ output_arrays = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * 128 * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ merged_output_arrays = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * 128 * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds710_prng_init_valid_on_current_logical_core( &ps, LFDS710_PRNG_SEED );
+
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ (tpts+loop)->ps = &ps;
+ (tpts+loop)->output_array = output_arrays + (loop * 128);
+ (tpts+loop)->read_index = 0; // TRD : convenient to alloc here, as we need one per thread, used in validation
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_generate, &tpts[loop] );
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ // TRD : merge sort the counter arrays into merged_output_array
+ while( finished_flag == LOWERED )
+ {
+ smallest_prng_value = LFDS710_PRNG_MAX;
+ finished_flag = RAISED;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ if( tpts[loop].read_index < 128 and tpts[loop].output_array[ tpts[loop].read_index ] < smallest_prng_value )
+ {
+ smallest_prng_value = tpts[loop].output_array[ tpts[loop].read_index ];
+ thread_to_bump = loop;
+ finished_flag = LOWERED;
+ }
+
+ tpts[thread_to_bump].read_index++;
+ merged_output_arrays[ merged_write_index++ ] = smallest_prng_value;
+ }
+
+ // TRD : now check for duplicates
+ while( duplicate_flag == LOWERED and index < (128 * number_logical_processors) - 2 )
+ {
+ if( merged_output_arrays[index] == merged_output_arrays[index+1] )
+ duplicate_flag = RAISED;
+ index++;
+ }
+
+ if( duplicate_flag == RAISED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : now for standard deviation (integer math only is allowed, and we can't sum the outputs because we'll overflow)
+ for( loop = 0 ; loop < 128 * number_logical_processors ; loop++ )
+ mean += merged_output_arrays[loop] / (128*number_logical_processors);
+
+ /* TRD : the mean of an unsigned 64 bit is 9223372036854775808
+ the mean of an unsigned 32 bit is 2147483648
+ there are 128 random numbers per thread
+ the more numbers there are, the more closely we should approach the expected mean
+ it'd take me a while - if I could - to work out the expected deviation for a given number of numbers
+ empirically, a single logical core (128 numbers) shouldn't be more than 10% off
+ */
+
+ ten_percent = LFDS710_PRNG_MAX / 10;
+
+ if( mean < (LFDS710_PRNG_MAX / 2) - ten_percent or mean > (LFDS710_PRNG_MAX / 2) + ten_percent )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : should add a standard deviation check here
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_generate( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ index = 0,
+ time_loop = 0;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ LFDS710_PRNG_GENERATE( *tpts->ps, tpts->output_array[index] );
+
+ // TRD : 128 element array, so masking on 128-1 makes us loop, much faster than modulus
+ index = ( (index+1) & 0x7F );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void libtest_tests_queue_bmm_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : struct lfds710_queue_bmm_state
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_queue_bmm_state,read_index) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_queue_bmm_state,write_index) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ return;
+}
+
+#pragma warning( default : 4100 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libtest_tests_queue_bmm_count( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ int
+ rv;
+
+ lfds710_pal_uint_t
+ expected_count = 0,
+ key,
+ loop,
+ number_elements,
+ subloop,
+ value;
+
+ struct lfds710_queue_bmm_element
+ qbmme_array[8];
+
+ struct lfds710_queue_bmm_state
+ qbmms;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : make a queue with 8 elements
+ 1. enqueue to full, count
+ 2. dequeue to empty, count
+ 3. enqueue to full, half dequeue, add two more, count
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_queue_bmm_init_valid_on_current_logical_core( &qbmms, qbmme_array, 8, NULL );
+
+ // TRD : loop a few times
+ for( loop = 0 ; loop < 64 ; loop++ )
+ {
+ // TRD : enqueue two (but the queue may be full, so we might only queue one)
+ for( subloop = 0 ; subloop < 2 ; subloop++ )
+ {
+ rv = lfds710_queue_bmm_enqueue( &qbmms, NULL, NULL );
+
+ if( rv == 1 and expected_count == 8 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( rv == 1 and expected_count < 8 )
+ expected_count++;
+ }
+
+ // TRD : dequeue one
+ rv = lfds710_queue_bmm_dequeue( &qbmms, (void **) &key, (void **) &value );
+
+ if( rv == 1 )
+ expected_count--;
+
+ if( rv == 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ lfds710_queue_bmm_query( &qbmms, LFDS710_QUEUE_BMM_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void *) &number_elements );
+
+ if( number_elements != expected_count )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+
+ lfds710_queue_bmm_cleanup( &qbmms, NULL );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ enum flag
+ error_flag;
+
+ struct lfds710_queue_bmm_state
+ *qbmms;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_simple_dequeuer( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_queue_bmm_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ counter = 0,
+ loop = 0,
+ power_of_two_number_elements = 1,
+ number_elements,
+ number_logical_processors;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_queue_bmm_element
+ *qbmme_array;
+
+ struct lfds710_queue_bmm_state
+ qbmms;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : create an empty queue, with the largest possible number of elements
+ do a single-threaded (in the prep function) full enqueue
+ with the value being an incrementing counter
+ then run one thread per CPU
+ where each thread busy-works, dequeuing, and checks the dequeued value is greater than the previously dequeued value
+ run until the queue is empty
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qbmme_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct lfds710_queue_bmm_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ // TRD : need to only use a power of 2 number of elements
+ number_elements >>= 1;
+
+ while( number_elements != 0 )
+ {
+ number_elements >>= 1;
+ power_of_two_number_elements <<= 1;
+ }
+
+ lfds710_queue_bmm_init_valid_on_current_logical_core( &qbmms, qbmme_array, power_of_two_number_elements, NULL );
+
+ // TRD : fill the queue
+ while( lfds710_queue_bmm_enqueue(&qbmms, NULL, (void *) (counter++)) );
+
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->qbmms = &qbmms;
+ (tpts+loop)->error_flag = LOWERED;
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_simple_dequeuer, &tpts[loop] );
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ /* TRD : we just check the per-thread error flags and validate the queue
+ most of the checking happened in the threads
+ */
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ if( (tpts+loop)->error_flag == RAISED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ORDER;
+
+ if( *dvs == LFDS710_MISC_VALIDITY_VALID )
+ {
+ vi.min_elements = vi.max_elements = 0;
+ lfds710_queue_bmm_query( &qbmms, LFDS710_QUEUE_BMM_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+ }
+
+ lfds710_queue_bmm_cleanup( &qbmms, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_simple_dequeuer( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ counter = 0,
+ key,
+ value;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( lfds710_queue_bmm_dequeue(tpts->qbmms, (void *) &key, (void *) &value) )
+ {
+ if( value < counter )
+ tpts->error_flag = RAISED;
+
+ if( value > counter )
+ counter = value;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ thread_number;
+
+ struct lfds710_queue_bmm_state
+ *qbmms;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_simple_enqueuer( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_queue_bmm_enqueuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ counter_number,
+ loop = 0,
+ *per_thread_counters,
+ power_of_two_number_elements = 1,
+ number_elements,
+ number_logical_processors,
+ thread_number;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_queue_bmm_element
+ *qbmme_array;
+
+ struct lfds710_queue_bmm_state
+ qbmms;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : create an empty queue, with the largest possible number of elements
+ then run one thread per CPU
+ where each thread busy-works, enqueuing key/values, where the key is the thread ID and the value is an incrementing per-thread counter
+ run until the queue is full
+
+ when we're done, we check that all the elements are present
+ and increment on a per-thread basis
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ per_thread_counters = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qbmme_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct lfds710_queue_bmm_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ // TRD : need to only use a power of 2 number of elements
+ number_elements >>= 1;
+
+ while( number_elements != 0 )
+ {
+ number_elements >>= 1;
+ power_of_two_number_elements <<= 1;
+ }
+
+ lfds710_queue_bmm_init_valid_on_current_logical_core( &qbmms, qbmme_array, power_of_two_number_elements, NULL );
+
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->qbmms = &qbmms;
+ (tpts+loop)->thread_number = loop;
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_simple_enqueuer, &tpts[loop] );
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ /* TRD : first, validate the queue
+ then dequeue
+ we expect to find element numbers increment on a per thread basis
+ */
+
+ vi.min_elements = vi.max_elements = power_of_two_number_elements;
+
+ lfds710_queue_bmm_query( &qbmms, LFDS710_QUEUE_BMM_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and lfds710_queue_bmm_dequeue(&qbmms, (void **) &thread_number, (void **) &counter_number) )
+ {
+ if( thread_number >= number_logical_processors )
+ {
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( counter_number > per_thread_counters[thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( counter_number < per_thread_counters[thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( counter_number == per_thread_counters[thread_number] )
+ per_thread_counters[thread_number]++;
+ }
+
+ lfds710_queue_bmm_cleanup( &qbmms, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_simple_enqueuer( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ counter = 0;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( lfds710_queue_bmm_enqueue(tpts->qbmms, (void *) (tpts->thread_number), (void *) (counter++)) );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ enum flag
+ error_flag;
+
+ lfds710_pal_uint_t
+ counter,
+ number_logical_processors,
+ *per_thread_counters,
+ thread_number;
+
+ struct lfds710_queue_bmm_state
+ *qbmms;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_enqueuer_and_dequeuer( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_queue_bmm_enqueuing_and_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors,
+ *per_thread_counters,
+ power_of_two_number_elements = 1,
+ subloop;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_queue_bmm_element
+ *qbmme_array;
+
+ struct lfds710_queue_bmm_state
+ qbmms;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : create a queue with one element per thread
+ each thread constly dequeues and enqueues from that one queue
+ where when enqueuing sets in the element
+ its thread number and counter
+ and when dequeuing, checks the thread number and counter
+ against previously seen counter for that thread
+ where it should always see a higher number
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ per_thread_counters = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * number_logical_processors * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ qbmme_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct lfds710_queue_bmm_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ // TRD : need to only use a power of 2 number of elements
+ number_elements >>= 1;
+
+ while( number_elements != 0 )
+ {
+ number_elements >>= 1;
+ power_of_two_number_elements <<= 1;
+ }
+
+ /* TRD : to make the test more demanding, smallest number of elements (greater than number of logical cores)
+ we really want one element per core
+ but with the power-of-2 requirements, we can't have it
+ */
+
+ while( power_of_two_number_elements > number_logical_processors )
+ power_of_two_number_elements >>= 1;
+
+ lfds710_queue_bmm_init_valid_on_current_logical_core( &qbmms, qbmme_array, power_of_two_number_elements, NULL );
+
+ // TRD : we assume the test will iterate at least once (or we'll have a false negative)
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ lfds710_queue_bmm_enqueue( &qbmms, (void *) loop, (void *) 0 );
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->qbmms = &qbmms;
+ (tpts+loop)->thread_number = loop;
+ (tpts+loop)->counter = 0;
+ (tpts+loop)->error_flag = LOWERED;
+ (tpts+loop)->per_thread_counters = per_thread_counters + loop * number_logical_processors;
+ (tpts+loop)->number_logical_processors = number_logical_processors;
+
+ for( subloop = 0 ; subloop < number_logical_processors ; subloop++ )
+ *((tpts+loop)->per_thread_counters+subloop) = 0;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_enqueuer_and_dequeuer, &tpts[loop] );
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = power_of_two_number_elements;
+
+ lfds710_queue_bmm_query( &qbmms, LFDS710_QUEUE_BMM_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ if( (tpts+loop)->error_flag == RAISED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ lfds710_queue_bmm_cleanup( &qbmms, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_enqueuer_and_dequeuer( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ counter,
+ thread_number,
+ time_loop = 0;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ /* TRD : this is a soft queue, so dequeue/enqueue operations although always occurring
+ may not be visible by the time the enqueue/dequeue function returns
+ i.e. all threads may have dequeued, and then enqueued, but not seen each others enqueues yet
+ so the queue looks empty
+ */
+
+ while( 0 == lfds710_queue_bmm_dequeue(tpts->qbmms, (void *) &thread_number, (void *) &counter) );
+
+ if( thread_number >= tpts->number_logical_processors )
+ tpts->error_flag = RAISED;
+ else
+ {
+ if( counter < tpts->per_thread_counters[thread_number] )
+ tpts->error_flag = RAISED;
+
+ if( counter >= tpts->per_thread_counters[thread_number] )
+ tpts->per_thread_counters[thread_number] = counter+1;
+ }
+
+ thread_number = tpts->thread_number;
+ counter = ++tpts->counter;
+
+ // TRD : the enqueue can only succeed once a dequeue of the *very next element* in the queue has become visible (i.e. our down earlier dequeue may not be the right dequeue)
+ while( 0 == lfds710_queue_bmm_enqueue(tpts->qbmms, (void *) thread_number, (void *) counter) );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ thread_number,
+ total_dequeues,
+ total_enqueues;
+
+ struct lfds710_queue_bmm_state
+ *qbmms;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_rapid_enqueuer_and_dequeuer( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_queue_bmm_rapid_enqueuing_and_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ counter,
+ loop,
+ number_logical_processors,
+ *per_thread_counters,
+ thread_number;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_queue_bmm_element
+ *qbmme_array;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct lfds710_queue_bmm_state
+ qbmms;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create a single queue with 50,000 elements
+ we don't want too many elements, so we ensure plenty of element re-use
+ each thread simply loops dequeuing and enqueuing
+ where the user data indicates thread number and an increment counter
+ vertification is that the counter increments on a per-thread basis
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ per_thread_counters = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ // TRD : must be a power of 2
+ qbmme_array = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct lfds710_queue_bmm_element) * 8192, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds710_queue_bmm_init_valid_on_current_logical_core( &qbmms, qbmme_array, 8192, NULL );
+
+ // TRD : we assume the test will iterate at least once (or we'll have a false negative)
+ for( loop = 0 ; loop < 8192 ; loop++ )
+ lfds710_queue_bmm_enqueue( &qbmms, (void *) 0, (void *) 0 );
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->qbmms = &qbmms;
+ (tpts+loop)->thread_number = loop;
+ (tpts+loop)->total_dequeues = 0;
+ (tpts+loop)->total_enqueues = 0;
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_rapid_enqueuer_and_dequeuer, &tpts[loop] );
+ loop++;
+ }
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = 8192;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ vi.min_elements -= tpts[loop].total_dequeues;
+ vi.min_elements += tpts[loop].total_enqueues;
+ }
+
+ vi.max_elements = vi.min_elements;
+
+ lfds710_queue_bmm_query( &qbmms, LFDS710_QUEUE_BMM_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ // TRD : now check results
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and lfds710_queue_bmm_dequeue(&qbmms, (void **) &thread_number, (void **) &counter) )
+ {
+ if( thread_number >= number_logical_processors )
+ {
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( per_thread_counters[thread_number] == 0 )
+ per_thread_counters[thread_number] = counter;
+
+ if( counter > per_thread_counters[thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( counter < per_thread_counters[thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( counter == per_thread_counters[thread_number] )
+ per_thread_counters[thread_number]++;
+ }
+
+ lfds710_queue_bmm_cleanup( &qbmms, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_rapid_enqueuer_and_dequeuer( void *libtest_threadset_per_thread_state )
+{
+ int
+ rv;
+
+ lfds710_pal_uint_t
+ key,
+ value,
+ time_loop = 0;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ // TRD : with BMM, both dequeue and so enqueue may spuriously fail, so we only increment counter if we did enqueue
+
+ tpts->total_dequeues += lfds710_queue_bmm_dequeue( tpts->qbmms, (void **) &key, (void **) &value );
+
+ // TRD : disgard the dequeue content - this is the rapid test
+
+ // TRD : counter (total_enqueues works in the same way, so using that) needs to increment, for validation checks
+ rv = lfds710_queue_bmm_enqueue( tpts->qbmms, (void *) tpts->thread_number, (void *) (tpts->total_enqueues) );
+
+ if( rv == 1 )
+ tpts->total_enqueues++;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libtest_tests_queue_bss_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ struct lfds710_queue_bss_element
+ element_array[128];
+
+ struct lfds710_queue_bss_state
+ qs;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ void
+ *value;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : create an empty queue
+ enqueue 128 elements
+ then dequeue the elements, in the same thread - we're API testing
+ it's a single producer queue, so we just do this in our current thread
+ since we're enqueuing and dequeuing in the same thread
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_queue_bss_init_valid_on_current_logical_core( &qs, element_array, 128, NULL );
+
+ for( loop = 0 ; loop < 127 ; loop++ )
+ lfds710_queue_bss_enqueue( &qs, NULL, (void *) loop );
+
+ for( loop = 0 ; loop < 127 ; loop++ )
+ {
+ lfds710_queue_bss_dequeue( &qs, NULL, &value );
+ if( (lfds710_pal_uint_t) value != 127 - loop )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ }
+
+ vi.min_elements = vi.max_elements = 0;
+
+ lfds710_queue_bss_query( &qs, LFDS710_QUEUE_BSS_QUERY_VALIDATE, &vi, dvs );
+
+ lfds710_queue_bss_cleanup( &qs, NULL );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libtest_tests_queue_bss_enqueuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ int
+ rv;
+
+ lfds710_pal_uint_t
+ loop;
+
+ struct lfds710_queue_bss_element
+ element_array[128];
+
+ struct lfds710_queue_bss_state
+ qs;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : create an empty queue
+ enqueue 128 elements
+ it's a single producer queue, so we just do this in our current thread
+ it's an API test
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_queue_bss_init_valid_on_current_logical_core( &qs, element_array, 128, NULL );
+
+ for( loop = 0 ; loop < 127 ; loop++ )
+ if( 1 != lfds710_queue_bss_enqueue(&qs, NULL, (void *) loop) )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : at this point enqueuing one more should return 0
+ rv = lfds710_queue_bss_enqueue( &qs, NULL, (void *) loop );
+
+ if( rv != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ vi.min_elements = vi.max_elements = 127;
+
+ lfds710_queue_bss_query( &qs, LFDS710_QUEUE_BSS_QUERY_VALIDATE, &vi, dvs );
+
+ lfds710_queue_bss_cleanup( &qs, NULL );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ enum flag
+ error_flag;
+
+ struct lfds710_queue_bss_state
+ *qs;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_enqueuer( void *libtest_threadset_per_thread_state );
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_dequeuer( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_queue_bss_enqueuing_and_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors;
+
+ struct lfds710_list_asu_element
+ *lasue;
+
+ struct lfds710_queue_bss_element
+ element_array[4];
+
+ struct lfds710_queue_bss_state
+ qs;
+
+ struct libtest_logical_processor
+ *lp,
+ *lp_first;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : so, this is the real test
+ problem is, because we use memory barriers only
+ and we only support one producer and one consumer
+ we need to ensure these threads are on different physical cores
+ if they're on the same core, the code would work even without memory barriers
+
+ problem is, in the test application, we only know the *number* of logical cores
+ obtaining topology information adds a great deal of complexity to the test app
+ and makes porting much harder
+
+ so, we know how many logical cores there are; my thought is to partially
+ permutate over them - we always run the producer on core 0, but we iterate
+ over the other logical cores, running the test once each time, with the
+ consumer being run on core 0, then core 1, then core 2, etc
+
+ (we run on core 0 for the single-cpu case; it's redundent, since a single
+ logical core running both producer and consumer will work, but otherwise
+ we have to skip the test, which is confusing for the user)
+
+ the test is one thread enqueuing and one thread dequeuing for two seconds
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * 2, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * 2, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ for( loop = 0 ; loop < 2 ; loop++ )
+ {
+ (tpts+loop)->qs = &qs;
+ (tpts+loop)->error_flag = LOWERED;
+ }
+
+ /* TRD : producer always on core 0
+ iterate over the other cores with consumer
+ */
+
+ lasue = LFDS710_LIST_ASU_GET_START( *list_of_logical_processors );
+ lp_first = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ while( lasue != NULL )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ lfds710_queue_bss_init_valid_on_current_logical_core( &qs, element_array, 4, NULL );
+
+ libtest_threadset_init( &ts, NULL );
+
+ LFDS710_MISC_BARRIER_STORE;
+ lfds710_misc_force_store();
+
+ libtest_threadset_add_thread( &ts, &pts[0], lp_first, thread_enqueuer, &tpts[0] );
+ libtest_threadset_add_thread( &ts, &pts[1], lp, thread_dequeuer, &tpts[1] );
+
+ libtest_threadset_run( &ts );
+ libtest_threadset_cleanup( &ts );
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ lfds710_queue_bss_cleanup( &qs, NULL );
+
+ lasue = LFDS710_LIST_ASU_GET_NEXT( *lasue );
+ }
+
+ if( (tpts+1)->error_flag == RAISED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_enqueuer( void *libtest_threadset_per_thread_state )
+{
+ int
+ rv;
+
+ lfds710_pal_uint_t
+ datum = 0,
+ time_loop = 0;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + 2 )
+ {
+ rv = lfds710_queue_bss_enqueue( tpts->qs, NULL, (void *) datum );
+
+ if( rv == 1 )
+ if( ++datum == 4 )
+ datum = 0;
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_dequeuer( void *libtest_threadset_per_thread_state )
+{
+ int
+ rv;
+
+ lfds710_pal_uint_t
+ datum,
+ expected_datum = 0,
+ time_loop = 0;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + 2 )
+ {
+ rv = lfds710_queue_bss_dequeue( tpts->qs, NULL, (void *) &datum );
+
+ if( rv == 1 )
+ {
+ if( datum != expected_datum )
+ tpts->error_flag = RAISED;
+
+ if( ++expected_datum == 4 )
+ expected_datum = 0;
+ }
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void libtest_tests_queue_umm_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : struct lfds710_queue_umm_element
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_queue_umm_element,next) % LFDS710_PAL_ALIGN_DOUBLE_POINTER != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+
+
+ // TRD : struct lfds710_queue_umm_state
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_queue_umm_state,enqueue) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_queue_umm_state,dequeue) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_queue_umm_state,aba_counter) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_queue_umm_state,user_state) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ return;
+}
+
+#pragma warning( default : 4100 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ enum flag
+ error_flag;
+
+ struct lfds710_queue_umm_state
+ *qs;
+};
+
+struct test_element
+{
+ struct lfds710_queue_umm_element
+ qe;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_simple_dequeuer( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_queue_umm_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_queue_umm_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ qe_dummy;
+
+ struct lfds710_queue_umm_state
+ qs;
+
+ struct lfds710_misc_validation_info
+ vi = { 0, 0 };
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : create a queue, add 1,000,000 elements
+
+ use a single thread to enqueue every element
+ each elements user data is an incrementing counter
+
+ then run one thread per CPU
+ where each busy-works dequeuing
+
+ when an element is dequeued, we check (on a per-thread basis) the
+ value dequeued is greater than the element previously dequeued
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ lfds710_queue_umm_init_valid_on_current_logical_core( &qs, &qe_dummy, NULL );
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ {
+ LFDS710_QUEUE_UMM_SET_VALUE_IN_ELEMENT( (te_array+loop)->qe, loop );
+ lfds710_queue_umm_enqueue( &qs, &(te_array+loop)->qe );
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->qs = &qs;
+ (tpts+loop)->error_flag = LOWERED;
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_simple_dequeuer, &tpts[loop] );
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ // TRD : check queue is empty
+ lfds710_queue_umm_query( &qs, LFDS710_QUEUE_UMM_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ // TRD : check for raised error flags
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ if( (tpts+loop)->error_flag == RAISED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ lfds710_queue_umm_cleanup( &qs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_simple_dequeuer( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ *prev_value,
+ *value;
+
+ struct lfds710_queue_umm_element
+ *qe;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ lfds710_queue_umm_dequeue( tpts->qs, &qe );
+ prev_value = LFDS710_QUEUE_UMM_GET_VALUE_FROM_ELEMENT( *qe );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( lfds710_queue_umm_dequeue(tpts->qs, &qe) )
+ {
+ value = LFDS710_QUEUE_UMM_GET_VALUE_FROM_ELEMENT( *qe );
+
+ if( value <= prev_value )
+ tpts->error_flag = RAISED;
+
+ prev_value = value;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ number_elements_per_thread,
+ thread_number;
+
+ struct lfds710_queue_umm_state
+ *qs;
+
+ struct test_element
+ *te_array;
+};
+
+struct test_element
+{
+ struct lfds710_queue_umm_element
+ qe;
+
+ lfds710_pal_uint_t
+ counter,
+ thread_number;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_simple_enqueuer( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_queue_umm_enqueuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ *per_thread_counters,
+ loop = 0,
+ number_elements,
+ number_elements_per_thread,
+ number_logical_processors;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_queue_umm_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ dummy_qe;
+
+ struct lfds710_queue_umm_element
+ *qe;
+
+ struct lfds710_queue_umm_state
+ qs;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te,
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : create an empty queue
+ then run one thread per CPU
+ where each thread busy-works, enqueuing elements from a freelist (one local freelist per thread)
+ until 100000 elements are enqueued, per thread
+ each element's void pointer of user data is a struct containing thread number and element number
+ where element_number is a thread-local counter starting at 0
+
+ when we're done, we check that all the elements are present
+ and increment on a per-thread basis
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ per_thread_counters = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / number_logical_processors;
+
+ lfds710_queue_umm_init_valid_on_current_logical_core( &qs, &dummy_qe, NULL );
+
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->qs = &qs;
+ (tpts+loop)->thread_number = loop;
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+ (tpts+loop)->te_array = te_array + loop * number_elements_per_thread;
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_simple_enqueuer, &tpts[loop] );
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ /* TRD : first, validate the queue
+
+ then dequeue
+ we expect to find element numbers increment on a per thread basis
+ */
+
+ vi.min_elements = vi.max_elements = number_elements_per_thread * number_logical_processors;
+
+ lfds710_queue_umm_query( &qs, LFDS710_QUEUE_UMM_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and lfds710_queue_umm_dequeue(&qs, &qe) )
+ {
+ te = LFDS710_QUEUE_UMM_GET_VALUE_FROM_ELEMENT( *qe );
+
+ if( te->thread_number >= number_logical_processors )
+ {
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( te->counter > per_thread_counters[te->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( te->counter < per_thread_counters[te->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( te->counter == per_thread_counters[te->thread_number] )
+ per_thread_counters[te->thread_number]++;
+ }
+
+ lfds710_queue_umm_cleanup( &qs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_simple_enqueuer( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ for( loop = 0 ; loop < tpts->number_elements_per_thread ; loop++ )
+ {
+ (tpts->te_array+loop)->thread_number = tpts->thread_number;
+ (tpts->te_array+loop)->counter = loop;
+ }
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ for( loop = 0 ; loop < tpts->number_elements_per_thread ; loop++ )
+ {
+ LFDS710_QUEUE_UMM_SET_VALUE_IN_ELEMENT( (tpts->te_array+loop)->qe, tpts->te_array+loop );
+ lfds710_queue_umm_enqueue( tpts->qs, &(tpts->te_array+loop)->qe );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ enum flag
+ error_flag;
+
+ lfds710_pal_uint_t
+ counter,
+ number_logical_processors,
+ *per_thread_counters,
+ thread_number;
+
+ struct lfds710_queue_umm_state
+ *qs;
+};
+
+struct test_element
+{
+ struct lfds710_queue_umm_element
+ qe,
+ *qe_use;
+
+ lfds710_pal_uint_t
+ counter,
+ thread_number;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_enqueuer_and_dequeuer( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_queue_umm_enqueuing_and_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors,
+ *per_thread_counters,
+ subloop;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_queue_umm_state
+ qs;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : create a queue with one element per thread
+ each thread constly dequeues and enqueues from that one queue
+ where when enqueuing sets in the element
+ its thread number and counter
+ and when dequeuing, checks the thread number and counter
+ against previously seen counter for that thread
+ where it should always see a higher number
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ per_thread_counters = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * number_logical_processors * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ lfds710_queue_umm_init_valid_on_current_logical_core( &qs, &(te_array+number_logical_processors)->qe, NULL );
+
+ // TRD : we assume the test will iterate at least once (or we'll have a false negative)
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ (te_array+loop)->thread_number = loop;
+ (te_array+loop)->counter = 0;
+ LFDS710_QUEUE_UMM_SET_VALUE_IN_ELEMENT( (te_array+loop)->qe, te_array+loop );
+ lfds710_queue_umm_enqueue( &qs, &(te_array+loop)->qe );
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->qs = &qs;
+ (tpts+loop)->thread_number = loop;
+ (tpts+loop)->counter = 0;
+ (tpts+loop)->error_flag = LOWERED;
+ (tpts+loop)->per_thread_counters = per_thread_counters + loop * number_logical_processors;
+ (tpts+loop)->number_logical_processors = number_logical_processors;
+
+ for( subloop = 0 ; subloop < number_logical_processors ; subloop++ )
+ *((tpts+loop)->per_thread_counters+subloop) = 0;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_enqueuer_and_dequeuer, &tpts[loop] );
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_logical_processors;
+
+ lfds710_queue_umm_query( &qs, LFDS710_QUEUE_UMM_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ if( (tpts+loop)->error_flag == RAISED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ lfds710_queue_umm_cleanup( &qs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_enqueuer_and_dequeuer( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ time_loop = 0;
+
+ struct lfds710_queue_umm_element
+ *qe;
+
+ struct test_element
+ *te;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ lfds710_queue_umm_dequeue( tpts->qs, &qe );
+ te = LFDS710_QUEUE_UMM_GET_VALUE_FROM_ELEMENT( *qe );
+
+ if( te->thread_number >= tpts->number_logical_processors )
+ tpts->error_flag = RAISED;
+ else
+ {
+ if( te->counter < tpts->per_thread_counters[te->thread_number] )
+ tpts->error_flag = RAISED;
+
+ if( te->counter >= tpts->per_thread_counters[te->thread_number] )
+ tpts->per_thread_counters[te->thread_number] = te->counter+1;
+ }
+
+ te->thread_number = tpts->thread_number;
+ te->counter = ++tpts->counter;
+
+ LFDS710_QUEUE_UMM_SET_VALUE_IN_ELEMENT( *qe, te );
+ lfds710_queue_umm_enqueue( tpts->qs, qe );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ struct lfds710_queue_umm_state
+ *qs;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_enqueuer_with_malloc_and_dequeuer_with_free( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_queue_umm_enqueuing_with_malloc_and_dequeuing_with_free( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop = 0,
+ number_logical_processors;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_queue_umm_element
+ dummy_element;
+
+ struct lfds710_queue_umm_state
+ qs;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : one thread per logical core
+ each thread loops for ten seconds
+ mallocs and enqueues 1k elements, then dequeues and frees 1k elements
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds710_queue_umm_init_valid_on_current_logical_core( &qs, &dummy_element, NULL );
+
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->qs = &qs;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_enqueuer_with_malloc_and_dequeuer_with_free, &tpts[loop] );
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = 0;
+
+ lfds710_queue_umm_query( &qs, LFDS710_QUEUE_UMM_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ lfds710_queue_umm_cleanup( &qs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_enqueuer_with_malloc_and_dequeuer_with_free( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ loop,
+ time_loop = 0;
+
+ struct lfds710_queue_umm_element
+ *qe;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ {
+ qe = libtest_misc_aligned_malloc( sizeof(struct lfds710_queue_umm_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ lfds710_queue_umm_enqueue( tpts->qs, qe );
+ }
+
+ for( loop = 0 ; loop < 1000 ; loop++ )
+ {
+ lfds710_queue_umm_dequeue( tpts->qs, &qe );
+ libtest_misc_aligned_free( qe );
+ }
+
+ if( time_loop++ == REDUCED_TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ counter,
+ thread_number;
+
+ struct lfds710_queue_umm_state
+ *qs;
+};
+
+struct test_element
+{
+ struct lfds710_queue_umm_element
+ qe,
+ *qe_use;
+
+ lfds710_pal_uint_t
+ counter,
+ thread_number;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_rapid_enqueuer_and_dequeuer( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_queue_umm_rapid_enqueuing_and_dequeuing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_logical_processors,
+ *per_thread_counters;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_queue_umm_element LFDS710_PAL_ALIGN(LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES)
+ qe_dummy;
+
+ struct lfds710_queue_umm_element
+ *qe;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct lfds710_queue_umm_state
+ qs;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array,
+ *te;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create a single queue with 50,000 elements
+ we don't want too many elements, so we ensure plenty of element re-use
+ each thread simply loops dequeuing and enqueuing
+ where the user data indicates thread number and an increment counter
+ vertification is that the counter increments on a per-thread basis
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ per_thread_counters = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ te_array = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_element) * 10000 * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ vi.min_elements = vi.max_elements = 10000;
+
+ lfds710_queue_umm_init_valid_on_current_logical_core( &qs, &qe_dummy, NULL );
+
+ // TRD : we assume the test will iterate at least once (or we'll have a false negative)
+ for( loop = 0 ; loop < 10000 ; loop++ )
+ {
+ (te_array+loop)->thread_number = loop;
+ (te_array+loop)->counter = 0;
+ LFDS710_QUEUE_UMM_SET_VALUE_IN_ELEMENT( (te_array+loop)->qe, te_array+loop );
+ lfds710_queue_umm_enqueue( &qs, &(te_array+loop)->qe );
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->qs = &qs;
+ (tpts+loop)->thread_number = loop;
+ (tpts+loop)->counter = 0;
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_rapid_enqueuer_and_dequeuer, &tpts[loop] );
+ loop++;
+ }
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ lfds710_queue_umm_query( &qs, LFDS710_QUEUE_UMM_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ // TRD : now check results
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and lfds710_queue_umm_dequeue(&qs, &qe) )
+ {
+ te = LFDS710_QUEUE_UMM_GET_VALUE_FROM_ELEMENT( *qe );
+
+ if( te->thread_number >= number_logical_processors )
+ {
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( per_thread_counters[te->thread_number] == 0 )
+ per_thread_counters[te->thread_number] = te->counter;
+
+ if( te->counter > per_thread_counters[te->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( te->counter < per_thread_counters[te->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( te->counter == per_thread_counters[te->thread_number] )
+ per_thread_counters[te->thread_number]++;
+ }
+
+ lfds710_queue_umm_cleanup( &qs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_rapid_enqueuer_and_dequeuer( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ time_loop = 0;
+
+ struct lfds710_queue_umm_element
+ *qe;
+
+ struct test_element
+ *te;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ lfds710_queue_umm_dequeue( tpts->qs, &qe );
+ te = LFDS710_QUEUE_UMM_GET_VALUE_FROM_ELEMENT( *qe );
+
+ te->thread_number = tpts->thread_number;
+ te->counter = tpts->counter++;
+
+ LFDS710_QUEUE_UMM_SET_VALUE_IN_ELEMENT( *qe, te );
+ lfds710_queue_umm_enqueue( tpts->qs, qe );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ enum flag
+ error_flag;
+
+ lfds710_pal_uint_t
+ read_count;
+
+ struct lfds710_ringbuffer_state
+ *rs;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_simple_reader( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_ringbuffer_reading( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ enum lfds710_misc_validity
+ local_dvs[2] = { LFDS710_MISC_VALIDITY_VALID, LFDS710_MISC_VALIDITY_VALID };
+
+ lfds710_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors,
+ total_read = 0;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_ringbuffer_element
+ *re_array;
+
+ struct lfds710_ringbuffer_state
+ rs;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create a single ringbuffer
+ with 1,000,000 elements
+ we populate the ringbuffer, where the
+ user data is an incrementing counter
+
+ we create one thread per CPU
+ where each thread busy-works,
+ reading until the ringbuffer is empty
+
+ each thread keep track of the number of reads it manages
+ and that each user data it reads is greater than the
+ previous user data that was read
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ // TRD : allocate
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ re_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct lfds710_ringbuffer_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ vi.min_elements = 0;
+ vi.max_elements = number_elements - 1;
+
+ lfds710_ringbuffer_init_valid_on_current_logical_core( &rs, re_array, number_elements, NULL );
+
+ // TRD : init the ringbuffer contents for the test
+ for( loop = 1 ; loop < number_elements ; loop++ )
+ lfds710_ringbuffer_write( &rs, NULL, (void *) (lfds710_pal_uint_t) loop, NULL, NULL, NULL );
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->rs = &rs;
+ (tpts+loop)->read_count = 0;
+ (tpts+loop)->error_flag = LOWERED;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_simple_reader, &tpts[loop] );
+
+ loop++;
+ }
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ lfds710_ringbuffer_query( &rs, LFDS710_RINGBUFFER_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) local_dvs );
+
+ if( local_dvs[0] != LFDS710_MISC_VALIDITY_VALID )
+ *dvs = local_dvs[0];
+
+ if( local_dvs[1] != LFDS710_MISC_VALIDITY_VALID )
+ *dvs = local_dvs[1];
+
+ if( *dvs == LFDS710_MISC_VALIDITY_VALID )
+ {
+ // TRD : check for raised error flags
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ if( (tpts+loop)->error_flag == RAISED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : check thread reads total to 1,000,000
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ total_read += (tpts+loop)->read_count;
+
+ if( total_read < number_elements - 1 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( total_read > number_elements - 1 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+ }
+
+ lfds710_ringbuffer_cleanup( &rs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_simple_reader( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ *prev_value,
+ *value;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ lfds710_ringbuffer_read( tpts->rs, NULL, (void **) &prev_value );
+ tpts->read_count++;
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( lfds710_ringbuffer_read(tpts->rs, NULL, (void **) &value) )
+ {
+ if( value <= prev_value )
+ tpts->error_flag = RAISED;
+
+ prev_value = value;
+
+ tpts->read_count++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ enum flag
+ error_flag;
+
+ lfds710_pal_uint_t
+ counter,
+ number_logical_processors,
+ *per_thread_counters,
+ thread_number;
+
+ struct lfds710_ringbuffer_state
+ *rs;
+};
+
+struct test_element
+{
+ lfds710_pal_uint_t
+ datum,
+ thread_number;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_reader_writer( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_ringbuffer_reading_and_writing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ enum lfds710_misc_validity
+ local_dvs[2] = { LFDS710_MISC_VALIDITY_VALID, LFDS710_MISC_VALIDITY_VALID };
+
+ lfds710_pal_uint_t
+ *counters,
+ loop,
+ number_elements,
+ number_logical_processors,
+ subloop;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_ringbuffer_element
+ *re_array;
+
+ struct lfds710_ringbuffer_state
+ rs;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create a single ringbuffer
+ with 100,000 elements
+ the ringbuffers starts empty
+
+ we create one thread per CPU
+ where each thread busy-works writing
+ and then immediately reading
+ for ten seconds
+
+ the user data in each written element is a combination
+ of the thread number and the counter
+
+ while a thread runs, it keeps track of the
+ counters for the other threads and throws an error
+ if it sees the number stay the same or decrease
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ // TRD : allocate
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ counters = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * number_logical_processors * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ re_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element) + sizeof(struct lfds710_ringbuffer_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+ te_array = (struct test_element *) ( re_array + number_elements );
+
+ vi.min_elements = 0;
+ vi.max_elements = number_elements;
+
+ lfds710_ringbuffer_init_valid_on_current_logical_core( &rs, re_array, number_elements, NULL );
+
+ // TRD : populate the ringbuffer
+ for( loop = 1 ; loop < number_elements ; loop++ )
+ {
+ te_array[loop].thread_number = 0;
+ te_array[loop].datum = (lfds710_pal_uint_t) -1 ;
+ lfds710_ringbuffer_write( &rs, NULL, &te_array[loop], NULL, NULL, NULL );
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ (tpts+loop)->rs = &rs;
+ (tpts+loop)->thread_number = loop;
+ (tpts+loop)->counter = 0;
+ (tpts+loop)->number_logical_processors = number_logical_processors;
+ (tpts+loop)->error_flag = LOWERED;
+ (tpts+loop)->per_thread_counters = counters + loop * number_logical_processors;
+
+ for( subloop = 0 ; subloop < number_logical_processors ; subloop++ )
+ *((tpts+loop)->per_thread_counters+subloop) = 0;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_reader_writer, &tpts[loop] );
+
+ loop++;
+ }
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ lfds710_ringbuffer_query( &rs, LFDS710_RINGBUFFER_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) local_dvs );
+
+ if( local_dvs[0] != LFDS710_MISC_VALIDITY_VALID )
+ *dvs = local_dvs[0];
+
+ if( local_dvs[1] != LFDS710_MISC_VALIDITY_VALID )
+ *dvs = local_dvs[1];
+
+ if( *dvs == LFDS710_MISC_VALIDITY_VALID )
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ if( (tpts+loop)->error_flag == RAISED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ lfds710_ringbuffer_cleanup( &rs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_reader_writer( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ time_loop = 0;
+
+ struct test_element
+ *te;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ lfds710_ringbuffer_read( tpts->rs, NULL, (void **) &te );
+
+ if( te->thread_number >= tpts->number_logical_processors )
+ tpts->error_flag = RAISED;
+ else
+ {
+ if( te->datum < tpts->per_thread_counters[te->thread_number] )
+ tpts->error_flag = RAISED;
+
+ if( te->datum >= tpts->per_thread_counters[te->thread_number] )
+ tpts->per_thread_counters[te->thread_number] = te->datum+1;
+ }
+
+ te->thread_number = tpts->thread_number;
+ te->datum = tpts->counter++;
+
+ lfds710_ringbuffer_write( tpts->rs, NULL, te, NULL, NULL, NULL );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ lfds710_pal_uint_t
+ thread_number,
+ datum;
+};
+
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ thread_number,
+ write_count;
+
+ struct test_element
+ te;
+
+ struct lfds710_ringbuffer_state
+ *rs;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_simple_writer( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_ringbuffer_writing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ enum lfds710_misc_validity
+ local_dvs[2] = { LFDS710_MISC_VALIDITY_VALID, LFDS710_MISC_VALIDITY_VALID };
+
+ lfds710_pal_uint_t
+ loop,
+ number_elements,
+ number_logical_processors,
+ *per_thread_counters;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_ringbuffer_element
+ *re_array;
+
+ struct lfds710_ringbuffer_state
+ rs;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te,
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create a single ringbuffer
+ with n elements
+ we create n test elements
+ which are thread_number/counter pairs
+ init them to safe values
+ and fully populate the ringbuffer
+
+ we create one thread per CPU
+ where each thread busy-works writing
+ for ten seconds; each thread has one extra element
+ which it uses for the first write and after that
+ it uses the element it picks up from overwriting
+
+ the user data in each written element is a combination
+ of the thread number and the counter
+
+ after the threads are complete, we validate by
+ checking the user data counters increment on a per thread
+ basis
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+
+ // TRD : allocate
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ per_thread_counters = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ re_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element) + sizeof(struct lfds710_ringbuffer_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+ te_array = (struct test_element *) ( re_array + number_elements );
+
+ vi.min_elements = number_elements;
+ vi.max_elements = number_elements;
+
+ lfds710_ringbuffer_init_valid_on_current_logical_core( &rs, re_array, number_elements, NULL );
+
+ // TRD : init the test elements and write them into the ringbuffer
+ for( loop = 1 ; loop < number_elements ; loop++ )
+ {
+ te_array[loop].thread_number = 0;
+ te_array[loop].datum = 0;
+ lfds710_ringbuffer_write( &rs, NULL, &te_array[loop], NULL, NULL, NULL );
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ (tpts+loop)->rs = &rs;
+ (tpts+loop)->thread_number = loop;
+ (tpts+loop)->write_count = 0;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_simple_writer, &tpts[loop] );
+
+ loop++;
+ }
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ // TRD : now check results
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = 0;
+
+ lfds710_ringbuffer_query( &rs, LFDS710_RINGBUFFER_QUERY_SINGLETHREADED_VALIDATE, &vi, (void *) local_dvs );
+
+ if( local_dvs[0] != LFDS710_MISC_VALIDITY_VALID )
+ *dvs = local_dvs[0];
+
+ if( local_dvs[1] != LFDS710_MISC_VALIDITY_VALID )
+ *dvs = local_dvs[1];
+
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and lfds710_ringbuffer_read(&rs, NULL, (void **) &te) )
+ {
+ if( te->thread_number >= number_logical_processors )
+ {
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( per_thread_counters[te->thread_number] == 0 )
+ per_thread_counters[te->thread_number] = te->datum;
+
+ if( te->datum < per_thread_counters[te->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( te->datum >= per_thread_counters[te->thread_number] )
+ per_thread_counters[te->thread_number] = te->datum+1;
+ }
+
+ lfds710_ringbuffer_cleanup( &rs, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_simple_writer( void *libtest_threadset_per_thread_state )
+{
+ enum lfds710_misc_flag
+ overwrite_occurred_flag;
+
+ lfds710_pal_uint_t
+ time_loop = 0;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_element
+ *te;
+
+ struct test_per_thread_state
+ *tpts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ tpts->te.thread_number = 0;
+ tpts->te.datum = 0;
+
+ lfds710_ringbuffer_write( tpts->rs, NULL, &tpts->te, &overwrite_occurred_flag, NULL, (void **) &te );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ te->thread_number = tpts->thread_number;
+ te->datum = tpts->write_count++;
+
+ lfds710_ringbuffer_write( tpts->rs, NULL, te, &overwrite_occurred_flag, NULL, (void **) &te );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 4127 ) // TRD : disables MSVC warning for condition expressions being const
+
+void libtest_tests_stack_alignment( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ /* TRD : these are compile time checks
+ but we do them here because this is a test programme
+ and it should indicate issues to users when it is *run*,
+ not when it is compiled, because a compile error normally
+ indicates a problem with the code itself and so is misleading
+ */
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : struct lfds710_stack_state
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_stack_state,top) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ if( LIBTEST_MISC_OFFSETOF(struct lfds710_stack_state,user_state) % LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES != 0 )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ return;
+}
+
+#pragma warning( default : 4100 4127 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ struct lfds710_stack_state
+ *ss;
+};
+
+struct test_element
+{
+ struct lfds710_stack_element
+ se;
+
+ enum flag
+ popped_flag;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_stack_popping( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop = 0,
+ number_elements,
+ number_logical_processors;
+
+ struct lfds710_stack_state
+ ss;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_misc_validation_info
+ vi = { 0, 0 };
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create a stack with as many elements as possible elements
+
+ the creation function runs in a single thread and creates
+ and pushes those elements onto the stack
+
+ each element contains a void pointer to the container test element
+
+ we then run one thread per CPU
+ where each thread loops, popping as quickly as possible
+ each test element has a flag which indicates it has been popped
+
+ the threads run till the source stack is empty
+
+ we then check the test elements
+ every element should have been popped
+
+ then tidy up
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ lfds710_stack_init_valid_on_current_logical_core( &ss, NULL );
+
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ {
+ (te_array+loop)->popped_flag = LOWERED;
+ LFDS710_STACK_SET_VALUE_IN_ELEMENT( (te_array+loop)->se, te_array+loop );
+ lfds710_stack_push( &ss, &(te_array+loop)->se );
+ }
+
+ libtest_threadset_init( &ts, NULL );
+
+ loop = 0;
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ tpts[loop].ss = &ss;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_popping, &tpts[loop] );
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ lfds710_stack_query( &ss, LFDS710_STACK_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ // TRD : now we check each element has popped_flag set to RAISED
+ for( loop = 0 ; loop < number_elements ; loop++ )
+ if( (te_array+loop)->popped_flag == LOWERED )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+
+ // TRD : cleanup
+ lfds710_stack_cleanup( &ss, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping( void *libtest_threadset_per_thread_state )
+{
+ struct lfds710_stack_element
+ *se;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_element
+ *te;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ while( lfds710_stack_pop(tpts->ss, &se) )
+ {
+ te = LFDS710_STACK_GET_VALUE_FROM_ELEMENT( *se );
+ te->popped_flag = RAISED;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_element
+{
+ struct lfds710_stack_element
+ se,
+ thread_local_se;
+
+ lfds710_pal_uint_t
+ datum;
+};
+
+struct test_per_thread_state
+{
+ struct lfds710_stack_state
+ ss_thread_local,
+ *ss;
+
+ lfds710_pal_uint_t
+ number_elements_per_thread;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping_and_pushing_start_popping( void *libtest_threadset_per_thread_state );
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping_and_pushing_start_pushing( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_stack_popping_and_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop,
+ number_elements,
+ number_elements_per_thread,
+ number_logical_processors,
+ subloop;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_stack_state
+ ss;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we have two threads per CPU
+ the threads loop for ten seconds
+ the first thread pushes 10000 elements then pops 10000 elements
+ the second thread pops 10000 elements then pushes 10000 elements
+ all pushes and pops go onto the single main stack
+ with a per-thread local stack to store the pops
+
+ after time is up, all threads push what they have remaining onto
+ the main stack
+
+ we then validate the main stack
+ */
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors * 2, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors * 2, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / (number_logical_processors * 2);
+
+ lfds710_stack_init_valid_on_current_logical_core( &ss, NULL );
+
+ // TRD : half of all elements in the main stack so the popping threads can start immediately
+ for( loop = 0 ; loop < number_elements_per_thread * number_logical_processors ; loop++ )
+ {
+ (te_array+loop)->datum = loop;
+ LFDS710_STACK_SET_VALUE_IN_ELEMENT( (te_array+loop)->se, te_array+loop );
+ lfds710_stack_push( &ss, &(te_array+loop)->se );
+ }
+
+ loop = 0;
+
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ // TRD : first set of threads (poppers)
+ (tpts+loop)->ss = &ss;
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+ lfds710_stack_init_valid_on_current_logical_core( &(tpts+loop)->ss_thread_local, NULL );
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_popping_and_pushing_start_popping, &tpts[loop] );
+
+ // TRD : second set of threads (pushers - who need elements in their per-thread stacks)
+ (tpts+loop+number_logical_processors)->ss = &ss;
+ (tpts+loop+number_logical_processors)->number_elements_per_thread = number_elements_per_thread;
+ lfds710_stack_init_valid_on_current_logical_core( &(tpts+loop+number_logical_processors)->ss_thread_local, NULL );
+ libtest_threadset_add_thread( &ts, &pts[loop+number_logical_processors], lp, thread_popping_and_pushing_start_pushing, &tpts[loop+number_logical_processors] );
+
+ for( subloop = number_elements_per_thread * (number_logical_processors + loop) ; subloop < number_elements_per_thread * (number_logical_processors + loop + 1) ; subloop++ )
+ {
+ LFDS710_STACK_SET_VALUE_IN_ELEMENT( (te_array+subloop)->thread_local_se, (te_array+subloop) );
+ lfds710_stack_push( &(tpts+loop+number_logical_processors)->ss_thread_local, &(te_array+subloop)->thread_local_se );
+ }
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_elements_per_thread * number_logical_processors * 2;
+
+ lfds710_stack_query( &ss, LFDS710_STACK_QUERY_SINGLETHREADED_VALIDATE, (void *) &vi, (void *) dvs );
+
+ lfds710_stack_cleanup( &ss, NULL );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ lfds710_stack_cleanup( &(tpts+loop)->ss_thread_local, NULL );
+ lfds710_stack_cleanup( &(tpts+loop+number_logical_processors)->ss_thread_local, NULL );
+ }
+
+ return;
+}
+
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping_and_pushing_start_popping( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ count;
+
+ struct lfds710_stack_element
+ *se;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ start_time = time( NULL );
+
+ while( time(NULL) < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ count = 0;
+
+ while( count < tpts->number_elements_per_thread )
+ if( lfds710_stack_pop(tpts->ss, &se) )
+ {
+ // TRD : we do nothing with the test data, so there'ss no GET or SET here
+ lfds710_stack_push( &tpts->ss_thread_local, se );
+ count++;
+ }
+
+ // TRD : return our local stack to the main stack
+ while( lfds710_stack_pop(&tpts->ss_thread_local, &se) )
+ lfds710_stack_push( tpts->ss, se );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_popping_and_pushing_start_pushing( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ count;
+
+ struct lfds710_stack_element
+ *se;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ time_t
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ start_time = time( NULL );
+
+ while( time(NULL) < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ // TRD : return our local stack to the main stack
+ while( lfds710_stack_pop(&tpts->ss_thread_local, &se) )
+ lfds710_stack_push( tpts->ss, se );
+
+ count = 0;
+
+ while( count < tpts->number_elements_per_thread )
+ if( lfds710_stack_pop(tpts->ss, &se) )
+ {
+ lfds710_stack_push( &tpts->ss_thread_local, se );
+ count++;
+ }
+ }
+
+ // TRD : now push whatever we have in our local stack
+ while( lfds710_stack_pop(&tpts->ss_thread_local, &se) )
+ lfds710_stack_push( tpts->ss, se );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ lfds710_pal_uint_t
+ number_elements_per_thread,
+ thread_number;
+
+ struct lfds710_stack_state
+ *ss;
+
+ struct test_element
+ *te_array;
+};
+
+struct test_element
+{
+ struct lfds710_stack_element
+ se;
+
+ lfds710_pal_uint_t
+ datum,
+ thread_number;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_pushing( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_stack_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ loop = 0,
+ number_elements,
+ number_elements_per_thread,
+ number_logical_processors,
+ *per_thread_counters;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_stack_element
+ *se;
+
+ struct lfds710_stack_state
+ ss;
+
+ struct lfds710_misc_validation_info
+ vi;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te,
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : we create an empty stack
+
+ we then create one thread per CPU, where each thread
+ pushes 100,000 elements each as quickly as possible to the stack
+
+ the data pushed is a counter and a thread ID
+
+ the threads exit when the stack is full
+
+ we then validate the stack;
+
+ checking that the counts increment on a per unique ID basis
+ and that the number of elements we pop equals 100,000 per thread
+ (since each element has an incrementing counter which is
+ unique on a per unique ID basis, we can know we didn't lose
+ any elements)
+ */
+
+ *dvs = LFDS710_MISC_VALIDITY_VALID;
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ per_thread_counters = libshared_memory_alloc_from_unknown_node( ms, sizeof(lfds710_pal_uint_t) * number_logical_processors, sizeof(lfds710_pal_uint_t) );
+ te_array = libshared_memory_alloc_largest_possible_array_from_unknown_node( ms, sizeof(struct test_element), LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES, &number_elements );
+
+ number_elements_per_thread = number_elements / number_logical_processors;
+
+ // TRD : the main stack
+ lfds710_stack_init_valid_on_current_logical_core( &ss, NULL );
+
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors, lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+
+ (tpts+loop)->ss = &ss;
+ (tpts+loop)->thread_number = loop;
+ (tpts+loop)->number_elements_per_thread = number_elements_per_thread;
+ (tpts+loop)->te_array = te_array + loop * number_elements_per_thread;
+
+ libtest_threadset_add_thread( &ts, &pts[loop], lp, thread_pushing, &tpts[loop] );
+
+ loop++;
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ *(per_thread_counters+loop) = number_elements_per_thread - 1;
+
+ vi.min_elements = vi.max_elements = number_elements_per_thread * number_logical_processors;
+
+ lfds710_stack_query( &ss, LFDS710_STACK_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ while( *dvs == LFDS710_MISC_VALIDITY_VALID and lfds710_stack_pop(&ss, &se) )
+ {
+ te = LFDS710_STACK_GET_VALUE_FROM_ELEMENT( *se );
+
+ if( te->thread_number >= number_logical_processors )
+ {
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_TEST_DATA;
+ break;
+ }
+
+ if( te->datum > per_thread_counters[te->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_ADDITIONAL_ELEMENTS;
+
+ if( te->datum < per_thread_counters[te->thread_number] )
+ *dvs = LFDS710_MISC_VALIDITY_INVALID_MISSING_ELEMENTS;
+
+ if( te->datum == per_thread_counters[te->thread_number] )
+ per_thread_counters[te->thread_number]--;
+ }
+
+ lfds710_stack_cleanup( &ss, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_pushing( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ struct test_per_thread_state
+ *tpts;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_MISC_BARRIER_LOAD;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ for( loop = 0 ; loop < tpts->number_elements_per_thread ; loop++ )
+ {
+ (tpts->te_array+loop)->thread_number = tpts->thread_number;
+ (tpts->te_array+loop)->datum = loop;
+ }
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ for( loop = 0 ; loop < tpts->number_elements_per_thread ; loop++ )
+ {
+ LFDS710_STACK_SET_VALUE_IN_ELEMENT( (tpts->te_array+loop)->se, tpts->te_array+loop );
+ lfds710_stack_push( tpts->ss, &(tpts->te_array+loop)->se );
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_tests_internal.h"
+
+/***** structs *****/
+struct test_per_thread_state
+{
+ struct lfds710_stack_state
+ *ss;
+};
+
+struct test_element
+{
+ struct lfds710_stack_element
+ se;
+
+ lfds710_pal_uint_t
+ datum;
+};
+
+/***** private prototypes *****/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_rapid_popping_and_pushing( void *libtest_threadset_per_thread_state );
+
+
+
+
+
+/****************************************************************************/
+void libtest_tests_stack_rapid_popping_and_pushing( struct lfds710_list_asu_state *list_of_logical_processors, struct libshared_memory_state *ms, enum lfds710_misc_validity *dvs )
+{
+ lfds710_pal_uint_t
+ index = 0,
+ loop,
+ number_logical_processors;
+
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct lfds710_misc_validation_info
+ vi = { 0, 0 };
+
+ struct lfds710_stack_state
+ ss;
+
+ struct libtest_logical_processor
+ *lp;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct libtest_threadset_state
+ ts;
+
+ struct test_element
+ *te_array;
+
+ struct test_per_thread_state
+ *tpts;
+
+ LFDS710_PAL_ASSERT( list_of_logical_processors != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ LFDS710_PAL_ASSERT( dvs != NULL );
+
+ /* TRD : in these tests there is a fundamental antagonism between
+ how much checking/memory clean up that we do and the
+ likelyhood of collisions between threads in their lock-free
+ operations
+
+ the lock-free operations are very quick; if we do anything
+ much at all between operations, we greatly reduce the chance
+ of threads colliding
+
+ so we have some tests which do enough checking/clean up that
+ they can tell the stack is valid and don't leak memory
+ and here, this test now is one of those which does minimal
+ checking - in fact, the nature of the test is that you can't
+ do any real checking - but goes very quickly
+
+ what we do is create a small stack and then run one thread
+ per CPU, where each thread simply pushes and then immediately
+ pops
+
+ the test runs for ten seconds
+
+ after the test is done, the only check we do is to traverse
+ the stack, checking for loops and ensuring the number of
+ elements is correct
+ */
+
+ // TRD : allocate
+ lfds710_list_asu_query( list_of_logical_processors, LFDS710_LIST_ASU_QUERY_GET_POTENTIALLY_INACCURATE_COUNT, NULL, (void **) &number_logical_processors );
+ tpts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ pts = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct libtest_threadset_per_thread_state) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+ te_array = libshared_memory_alloc_from_unknown_node( ms, sizeof(struct test_element) * number_logical_processors, LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES );
+
+ lfds710_stack_init_valid_on_current_logical_core( &ss, NULL );
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ tpts[loop].ss = &ss;
+
+ for( loop = 0 ; loop < number_logical_processors ; loop++ )
+ {
+ LFDS710_STACK_SET_VALUE_IN_ELEMENT( te_array[loop].se, &te_array[loop] );
+ lfds710_stack_push( &ss, &te_array[loop].se );
+ }
+
+ // TRD : get the threads ready
+ libtest_threadset_init( &ts, NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(*list_of_logical_processors,lasue) )
+ {
+ lp = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ libtest_threadset_add_thread( &ts, &pts[index], lp, thread_rapid_popping_and_pushing, &tpts[index] );
+ index++;
+ }
+
+ // TRD : run the test
+ libtest_threadset_run( &ts );
+
+ libtest_threadset_cleanup( &ts );
+
+ // TRD : validate
+ LFDS710_MISC_BARRIER_LOAD;
+
+ vi.min_elements = vi.max_elements = number_logical_processors;
+
+ lfds710_stack_query( &ss, LFDS710_STACK_QUERY_SINGLETHREADED_VALIDATE, &vi, dvs );
+
+ lfds710_stack_cleanup( &ss, NULL );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+static libshared_pal_thread_return_t LIBSHARED_PAL_THREAD_CALLING_CONVENTION thread_rapid_popping_and_pushing( void *libtest_threadset_per_thread_state )
+{
+ lfds710_pal_uint_t
+ time_loop = 0;
+
+ struct lfds710_stack_element
+ *se;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ struct test_per_thread_state
+ *tpts;
+
+ time_t
+ current_time,
+ start_time;
+
+ LFDS710_MISC_MAKE_VALID_ON_CURRENT_LOGICAL_CORE_INITS_COMPLETED_BEFORE_NOW_ON_ANY_OTHER_LOGICAL_CORE;
+
+ LFDS710_PAL_ASSERT( libtest_threadset_per_thread_state != NULL );
+
+ pts = (struct libtest_threadset_per_thread_state *) libtest_threadset_per_thread_state;
+ tpts = LIBTEST_THREADSET_GET_USER_STATE_FROM_PER_THREAD_STATE( *pts );
+
+ libtest_threadset_thread_ready_and_wait( pts );
+
+ current_time = start_time = time( NULL );
+
+ while( current_time < start_time + TEST_DURATION_IN_SECONDS )
+ {
+ lfds710_stack_pop( tpts->ss, &se );
+ lfds710_stack_push( tpts->ss, se );
+
+ if( time_loop++ == TIME_LOOP_COUNT )
+ {
+ time_loop = 0;
+ time( ¤t_time );
+ }
+ }
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ return LIBSHARED_PAL_THREAD_RETURN_CAST(RETURN_SUCCESS);
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_testsuite_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libtest_testsuite_cleanup( struct libtest_testsuite_state *ts )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+
+ lfds710_list_asu_cleanup( &ts->list_of_logical_processors, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_testsuite_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4127 )
+
+void libtest_testsuite_init( struct libtest_testsuite_state *ts,
+ struct libshared_memory_state *ms,
+ void (*callback_test_start)(char *test_name),
+ void (*callback_test_finish)(char *result) )
+{
+ enum libtest_test_id
+ test_id;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( ms != NULL );
+ // TRD : callback_test_start can be NULL
+ // TRD : callback_test_finish can be NULL
+
+ // TRD : configure the testsuite state with all the test supported by this platform
+
+ libtest_pal_get_full_logical_processor_set( &ts->list_of_logical_processors, ms );
+ ts->ms = ms;
+ ts->callback_test_start = callback_test_start;
+ ts->callback_test_finish = callback_test_finish;
+
+ for( test_id = 0 ; test_id < LIBTEST_TEST_ID_COUNT ; test_id++ )
+ ts->test_available_flag[test_id] = LOWERED;
+
+ if( LFDS710_MISC_ATOMIC_SUPPORT_COMPILER_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_ADD )
+ {
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_PRNG_ALIGNMENT], "PRNG alignment", LIBTEST_TEST_ID_PRNG_ALIGNMENT, libtest_tests_prng_alignment );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_PRNG_GENERATE], "PRNG generation", LIBTEST_TEST_ID_PRNG_GENERATE, libtest_tests_prng_generate );
+ ts->test_available_flag[LIBTEST_TEST_ID_PRNG_ALIGNMENT] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_PRNG_GENERATE] = RAISED;
+ }
+
+ if( LFDS710_MISC_ATOMIC_SUPPORT_COMPILER_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_PROCESSOR_BARRIERS )
+ {
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_BSS_DEQUEUING], "Queue (bounded, single producer, single consumer) dequeuing", LIBTEST_TEST_ID_QUEUE_BSS_DEQUEUING, libtest_tests_queue_bss_dequeuing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_BSS_ENQUEUING], "Queue (bounded, single producer, single consumer) enqueuing", LIBTEST_TEST_ID_QUEUE_BSS_ENQUEUING, libtest_tests_queue_bss_enqueuing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_BSS_ENQUEUING_AND_DEQUEUING], "Queue (bounded, single producer, single consumer) enqueuing and dequeuing", LIBTEST_TEST_ID_QUEUE_BSS_ENQUEUING_AND_DEQUEUING, libtest_tests_queue_bss_enqueuing_and_dequeuing );
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_BSS_DEQUEUING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_BSS_ENQUEUING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_BSS_ENQUEUING_AND_DEQUEUING] = RAISED;
+ }
+
+ if( LFDS710_MISC_ATOMIC_SUPPORT_COMPILER_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_PROCESSOR_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_CAS )
+ {
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_ALIGNMENT], "BTree (addonly, unbalanced) alignment", LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_ALIGNMENT, libtest_tests_btree_au_alignment );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_RANDOM_ADDS_FAIL], "BTree (addonly, unbalanced) adds and walking (fail on existing key)", LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_RANDOM_ADDS_FAIL, libtest_tests_btree_au_random_adds_fail_on_existing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_RANDOM_ADDS_FAIL_AND_OVERWRITE], "BTree (addonly, unbalanced) adds and walking (ovewrite on existing key)", LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_RANDOM_ADDS_FAIL_AND_OVERWRITE, libtest_tests_btree_au_random_adds_overwrite_on_existing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_RANDOM_ADDS_OVERWRITE], "BTree (addonly, unbalanced) fail and overwrite on existing key", LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_RANDOM_ADDS_OVERWRITE, libtest_tests_btree_au_fail_and_overwrite_on_existing_key );
+ ts->test_available_flag[LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_ALIGNMENT] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_RANDOM_ADDS_FAIL] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_RANDOM_ADDS_FAIL_AND_OVERWRITE] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_BTREE_ADDONLY_UNBALANCED_RANDOM_ADDS_OVERWRITE] = RAISED;
+
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_HASH_ADDONLY_ALIGNMENT], "Hash (addonly) alignment", LIBTEST_TEST_ID_HASH_ADDONLY_ALIGNMENT, libtest_tests_hash_a_alignment );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_HASH_ADDONLY_FAIL_AND_OVERWRITE], "Hash (addonly) fail and overwrite", LIBTEST_TEST_ID_HASH_ADDONLY_FAIL_AND_OVERWRITE, libtest_tests_hash_a_fail_and_overwrite_on_existing_key );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_HASH_ADDONLY_RANDOM_ADDS_FAIL], "Hash (addonly) random adds (fail on existing key)", LIBTEST_TEST_ID_HASH_ADDONLY_RANDOM_ADDS_FAIL, libtest_tests_hash_a_random_adds_fail_on_existing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_HASH_ADDONLY_RANDOM_ADDS_OVERWRITE], "Hash (addonly) random adds (overwrite on existing key)", LIBTEST_TEST_ID_HASH_ADDONLY_RANDOM_ADDS_OVERWRITE, libtest_tests_hash_a_random_adds_overwrite_on_existing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_HASH_ADDONLY_ITERATE], "Hash (addonly) iterate", LIBTEST_TEST_ID_HASH_ADDONLY_ITERATE, libtest_tests_hash_a_iterate );
+ ts->test_available_flag[LIBTEST_TEST_ID_HASH_ADDONLY_ALIGNMENT] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_HASH_ADDONLY_FAIL_AND_OVERWRITE] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_HASH_ADDONLY_RANDOM_ADDS_FAIL] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_HASH_ADDONLY_RANDOM_ADDS_OVERWRITE] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_HASH_ADDONLY_ITERATE] = RAISED;
+
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_ORDERED_ALIGNMENT], "List (addonly, ordered, singlylinked) alignment", LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_ORDERED_ALIGNMENT, libtest_tests_list_aso_alignment );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_ORDERED_NEW_ORDERED], "List (addonly, ordered, singlylinked) new ordered", LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_ORDERED_NEW_ORDERED, libtest_tests_list_aso_new_ordered );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_ORDERED_NEW_ORDERED_WITH_CURSOR], "List (addonly, ordered, singlylinked) new ordered with cursor", LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_ORDERED_NEW_ORDERED_WITH_CURSOR, libtest_tests_list_aso_new_ordered_with_cursor );
+ ts->test_available_flag[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_ORDERED_ALIGNMENT] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_ORDERED_NEW_ORDERED] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_ORDERED_NEW_ORDERED_WITH_CURSOR] = RAISED;
+
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_ALIGNMENT], "List (addonly, singlylinked, unordered) alignment", LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_ORDERED_ALIGNMENT, libtest_tests_list_asu_alignment );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_NEW_START], "List (addonly, singlylinked, unordered) new start", LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_NEW_START, libtest_tests_list_asu_new_start );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_NEW_END], "List (addonly, singlylinked, unordered) new end", LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_NEW_END, libtest_tests_list_asu_new_end );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_NEW_AFTER], "List (addonly, singlylinked, unordered) new after", LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_NEW_AFTER, libtest_tests_list_asu_new_after );
+ ts->test_available_flag[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_ALIGNMENT] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_NEW_START] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_NEW_END] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_LIST_ADDONLY_SINGLYLINKED_UNORDERED_NEW_AFTER] = RAISED;
+
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_BMM_ALIGNMENT], "Queue (bounded, many consumer, many producer) alignment", LIBTEST_TEST_ID_QUEUE_BMM_ALIGNMENT, libtest_tests_queue_bmm_alignment );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_BMM_COUNT], "Queue (bounded, many consumer, many producer) count", LIBTEST_TEST_ID_QUEUE_BMM_COUNT, libtest_tests_queue_bmm_count );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_BMM_ENQUEUING], "Queue (bounded, many consumer, many producer) enqueuing", LIBTEST_TEST_ID_QUEUE_BMM_ENQUEUING, libtest_tests_queue_bmm_enqueuing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_BMM_DEQUEUING], "Queue (bounded, many consumer, many producer) dequeuing", LIBTEST_TEST_ID_QUEUE_BMM_DEQUEUING, libtest_tests_queue_bmm_dequeuing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_BMM_ENQUEUING_AND_DEQUEUING], "Queue (bounded, many consumer, many producer) enqueuing and dequeuing", LIBTEST_TEST_ID_QUEUE_BMM_ENQUEUING_AND_DEQUEUING, libtest_tests_queue_bmm_enqueuing_and_dequeuing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_BMM_RAPID_ENQUEUING_AND_DEQUEUING], "Queue (bounded, many consumer, many producer) rapid enqueuing and dequeuing", LIBTEST_TEST_ID_QUEUE_BMM_RAPID_ENQUEUING_AND_DEQUEUING, libtest_tests_queue_bmm_rapid_enqueuing_and_dequeuing );
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_BMM_ALIGNMENT] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_BMM_COUNT] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_BMM_ENQUEUING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_BMM_DEQUEUING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_BMM_ENQUEUING_AND_DEQUEUING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_BMM_RAPID_ENQUEUING_AND_DEQUEUING] = RAISED;
+ }
+
+ if( LFDS710_MISC_ATOMIC_SUPPORT_COMPILER_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_PROCESSOR_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_DWCAS )
+ {
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_FREELIST_ALIGNMENT], "Freelist alignment", LIBTEST_TEST_ID_FREELIST_ALIGNMENT, libtest_tests_freelist_alignment );
+ ts->test_available_flag[LIBTEST_TEST_ID_FREELIST_ALIGNMENT] = RAISED;
+
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_FREELIST_EA_POPPING], "Freelist (with EA) popping", LIBTEST_TEST_ID_FREELIST_EA_POPPING, libtest_tests_freelist_ea_popping );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_FREELIST_EA_POPPING_AND_PUSHING], "Freelist (with EA) popping and pushing", LIBTEST_TEST_ID_FREELIST_EA_POPPING_AND_PUSHING, libtest_tests_freelist_ea_popping_and_pushing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_FREELIST_EA_PUSHING], "Freelist (with EA) pushing", LIBTEST_TEST_ID_FREELIST_EA_PUSHING, libtest_tests_freelist_ea_pushing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_FREELIST_EA_RAPID_POPPING_AND_PUSHING], "Freelist (with EA) rapid popping and pushing", LIBTEST_TEST_ID_FREELIST_EA_RAPID_POPPING_AND_PUSHING, libtest_tests_freelist_ea_rapid_popping_and_pushing );
+ ts->test_available_flag[LIBTEST_TEST_ID_FREELIST_EA_POPPING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_FREELIST_EA_POPPING_AND_PUSHING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_FREELIST_EA_PUSHING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_FREELIST_EA_RAPID_POPPING_AND_PUSHING] = RAISED;
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_POPPING], "Freelist (without EA) popping", LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_POPPING, libtest_tests_freelist_without_ea_popping );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_POPPING_AND_PUSHING], "Freelist (without EA) popping and pushing", LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_POPPING_AND_PUSHING, libtest_tests_freelist_without_ea_popping_and_pushing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_PUSHING], "Freelist (without EA) pushing", LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_PUSHING, libtest_tests_freelist_without_ea_pushing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_RAPID_POPPING_AND_PUSHING], "Freelist (without EA) rapid popping and pushing", LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_RAPID_POPPING_AND_PUSHING, libtest_tests_freelist_without_ea_rapid_popping_and_pushing );
+ ts->test_available_flag[LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_POPPING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_POPPING_AND_PUSHING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_PUSHING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_FREELIST_WITHOUT_EA_RAPID_POPPING_AND_PUSHING] = RAISED;
+
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_RINGBUFFER_READING], "Ringbuffer reading", LIBTEST_TEST_ID_RINGBUFFER_READING, libtest_tests_ringbuffer_reading );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_RINGBUFFER_WRITING], "Ringbuffer writing", LIBTEST_TEST_ID_RINGBUFFER_WRITING, libtest_tests_ringbuffer_writing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_RINGBUFFER_READING_AND_WRITING], "Ringbuffer reading and writing", LIBTEST_TEST_ID_RINGBUFFER_READING_AND_WRITING, libtest_tests_ringbuffer_reading_and_writing );
+ ts->test_available_flag[LIBTEST_TEST_ID_RINGBUFFER_READING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_RINGBUFFER_WRITING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_RINGBUFFER_READING_AND_WRITING] = RAISED;
+
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_STACK_ALIGNMENT], "Stack alignment", LIBTEST_TEST_ID_STACK_ALIGNMENT, libtest_tests_stack_alignment );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_STACK_POPPING], "Stack popping", LIBTEST_TEST_ID_STACK_POPPING, libtest_tests_stack_popping );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_STACK_POPPING_AND_PUSHING], "Stack popping and pushing", LIBTEST_TEST_ID_STACK_POPPING_AND_PUSHING, libtest_tests_stack_popping_and_pushing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_STACK_PUSHING], "Stack pushing", LIBTEST_TEST_ID_STACK_PUSHING, libtest_tests_stack_pushing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_STACK_RAPID_POPPING_AND_PUSHING], "Stack rapid popping and pushing", LIBTEST_TEST_ID_STACK_RAPID_POPPING_AND_PUSHING, libtest_tests_stack_rapid_popping_and_pushing );
+ ts->test_available_flag[LIBTEST_TEST_ID_STACK_ALIGNMENT] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_STACK_POPPING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_STACK_POPPING_AND_PUSHING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_STACK_PUSHING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_STACK_RAPID_POPPING_AND_PUSHING] = RAISED;
+
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_UMM_ALIGNMENT], "Queue (unbounded, many producer, many consumer) alignment", LIBTEST_TEST_ID_QUEUE_UMM_ALIGNMENT, libtest_tests_queue_umm_alignment );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_UMM_ENQUEUING], "Queue (unbounded, many producer, many consumer) enqueuing", LIBTEST_TEST_ID_QUEUE_UMM_ENQUEUING, libtest_tests_queue_umm_enqueuing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_UMM_DEQUEUING], "Queue (unbounded, many producer, many consumer) dequeuing", LIBTEST_TEST_ID_QUEUE_UMM_DEQUEUING, libtest_tests_queue_umm_dequeuing );
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_UMM_ENQUEUING_AND_DEQUEUING], "Queue (unbounded, many producer, many consumer) enqueuing and dequeuing", LIBTEST_TEST_ID_QUEUE_UMM_ENQUEUING_AND_DEQUEUING, libtest_tests_queue_umm_enqueuing_and_dequeuing );
+ #if( defined LIBTEST_PAL_MALLOC && defined LIBTEST_PAL_FREE )
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_UMM_ENQUEUING_WITH_MALLOC_AND_DEQUEUING_WITH_FREE], "Queue (unbounded, many producer, many consumer) enqueuing with malloc and dequeuing with free", LIBTEST_TEST_ID_QUEUE_UMM_ENQUEUING_WITH_MALLOC_AND_DEQUEUING_WITH_FREE, libtest_tests_queue_umm_enqueuing_with_malloc_and_dequeuing_with_free );
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_UMM_ENQUEUING_WITH_MALLOC_AND_DEQUEUING_WITH_FREE] = RAISED;
+ #endif
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_QUEUE_UMM_RAPID_ENQUEUING_AND_DEQUEUING], "Queue (unbounded, many producer, many consumer) rapid enqueuing and dequeuing", LIBTEST_TEST_ID_QUEUE_UMM_RAPID_ENQUEUING_AND_DEQUEUING, libtest_tests_queue_umm_rapid_enqueuing_and_dequeuing );
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_UMM_ALIGNMENT] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_UMM_ENQUEUING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_UMM_DEQUEUING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_UMM_ENQUEUING_AND_DEQUEUING] = RAISED;
+ ts->test_available_flag[LIBTEST_TEST_ID_QUEUE_UMM_RAPID_ENQUEUING_AND_DEQUEUING] = RAISED;
+ }
+
+ if( LFDS710_MISC_ATOMIC_SUPPORT_COMPILER_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_ADD )
+ {
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_ADD], "Atomic add", LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_ADD, libtest_tests_pal_atomic_add );
+ ts->test_available_flag[LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_ADD] = RAISED;
+ }
+
+ if( LFDS710_MISC_ATOMIC_SUPPORT_COMPILER_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_CAS )
+ {
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_CAS], "Atomic CAS", LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_CAS, libtest_tests_pal_atomic_cas );
+ ts->test_available_flag[LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_CAS] = RAISED;
+ }
+
+ if( LFDS710_MISC_ATOMIC_SUPPORT_COMPILER_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_DWCAS )
+ {
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_DCAS], "Atomic DWCAS", LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_DCAS, libtest_tests_pal_atomic_dwcas );
+ ts->test_available_flag[LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_DCAS] = RAISED;
+ }
+
+ if( LFDS710_MISC_ATOMIC_SUPPORT_COMPILER_BARRIERS and LFDS710_MISC_ATOMIC_SUPPORT_EXCHANGE )
+ {
+ libtest_test_init( &ts->tests[LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_EXCHANGE], "Atomic exchange", LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_EXCHANGE, libtest_tests_pal_atomic_exchange );
+ ts->test_available_flag[LIBTEST_TEST_ID_PORTING_ABSTRACTION_LAYER_EXCHANGE] = RAISED;
+ }
+
+ return;
+}
+
+#pragma warning( default : 4127 )
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libtest_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libtest_testsuite_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libtest_testsuite_run( struct libtest_testsuite_state *ts, struct libtest_results_state *rs )
+{
+ enum libtest_test_id
+ test_id;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( rs != NULL );
+
+ for( test_id = 0 ; test_id < LIBTEST_TEST_ID_COUNT ; test_id++ )
+ if( ts->test_available_flag[test_id] == RAISED )
+ {
+ libshared_memory_set_rollback( ts->ms );
+
+ if( ts->callback_test_start != NULL )
+ ts->callback_test_start( ts->tests[test_id].name );
+
+ libtest_test_run( &ts->tests[test_id], &ts->list_of_logical_processors, ts->ms, &rs->dvs[test_id] );
+
+ if( ts->callback_test_finish != NULL )
+ ts->callback_test_finish( libtest_misc_global_validity_names[ rs->dvs[test_id] ] );
+
+ libshared_memory_rollback( ts->ms );
+ }
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_threadset_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libtest_threadset_add_thread( struct libtest_threadset_state *ts,
+ struct libtest_threadset_per_thread_state *pts,
+ struct libtest_logical_processor *lp,
+ libshared_pal_thread_return_t (LIBSHARED_PAL_THREAD_CALLING_CONVENTION *thread_function)( void *thread_user_state ),
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+ LFDS710_PAL_ASSERT( pts != NULL );
+ LFDS710_PAL_ASSERT( lp != NULL );
+ LFDS710_PAL_ASSERT( thread_function != NULL );
+ // TRD : user_state can be NULL
+
+ pts->thread_ready_flag = LOWERED;
+ pts->threadset_start_flag = &ts->threadset_start_flag;
+ pts->pti.logical_processor_number = lp->logical_processor_number;
+ pts->pti.windows_processor_group_number = lp->windows_processor_group_number;
+ pts->pti.thread_function = thread_function;
+ pts->ts = ts;
+ pts->pti.thread_argument = pts;
+ pts->user_state = user_state;
+
+ LFDS710_LIST_ASU_SET_VALUE_IN_ELEMENT( pts->lasue, pts );
+ lfds710_list_asu_insert_at_start( &ts->list_of_per_thread_states, &pts->lasue );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "libtest_threadset_internal.h"
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void libtest_threadset_cleanup( struct libtest_threadset_state *ts )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+
+ // TRD : we do naaauuuuthin'
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
--- /dev/null
+/***** includes *****/
+#include "libtest_threadset_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libtest_threadset_init( struct libtest_threadset_state *ts,
+ void *user_state )
+{
+ LFDS710_PAL_ASSERT( ts != NULL );
+ // TRD : user_state can be NULL
+
+ ts->threadset_start_flag = LOWERED;
+ ts->user_state = user_state;
+
+ lfds710_list_asu_init_valid_on_current_logical_core( &ts->list_of_per_thread_states, NULL );
+
+ return;
+}
+
--- /dev/null
+/***** the library wide include file *****/
+#include "../libtest_internal.h"
+
+/***** private prototypes *****/
+
--- /dev/null
+/***** includes *****/
+#include "libtest_threadset_internal.h"
+
+
+
+
+
+/****************************************************************************/
+void libtest_threadset_run( struct libtest_threadset_state *ts )
+{
+ struct lfds710_list_asu_element
+ *lasue = NULL;
+
+ struct libtest_threadset_per_thread_state
+ *pts;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+
+ LFDS710_MISC_BARRIER_STORE;
+
+ lfds710_misc_force_store();
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(ts->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ libshared_pal_thread_start( &pts->thread_handle, &pts->pti );
+ }
+
+ ts->threadset_start_flag = RAISED;
+
+ LFDS710_PAL_ASSERT( ts != NULL );
+
+ while( LFDS710_LIST_ASU_GET_START_AND_THEN_NEXT(ts->list_of_per_thread_states,lasue) )
+ {
+ pts = LFDS710_LIST_ASU_GET_VALUE_FROM_ELEMENT( *lasue );
+ libshared_pal_thread_wait( pts->thread_handle );
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void libtest_threadset_thread_ready_and_wait( struct libtest_threadset_per_thread_state *pts )
+{
+ LFDS710_PAL_ASSERT( pts != NULL );
+
+ pts->thread_ready_flag = RAISED;
+
+ LFDS710_MISC_BARRIER_FULL;
+
+ while( *pts->threadset_start_flag == LOWERED )
+ LFDS710_MISC_BARRIER_LOAD;
+
+ return;
+}
+
--- /dev/null
+##### notes #####
+# TRD : -fno-strict-aliasing is needed because GCC has messed up type punning and __may_alias__ does absolutely nothing
+# -Wno-unused-but-set-variable and -Wno-uninitialized are needed because GCC seems confused by the atomic intrinsics
+# the code base for release has been compiled with those warnings enabled, to show any valid errors
+
+##### paths #####
+BINDIR := ../../bin
+OBJDIR := ../../obj
+SRCDIR := ../../src
+LIBINCDIRS := ../../../../liblfds710/inc/ ../../../libtest/inc/ ../../../libshared/inc/
+LIBBINDIRS := ../../../../liblfds710/bin/ ../../../libtest/bin/ ../../../libshared/bin/
+
+##### misc #####
+QUIETLY := 1>/dev/null 2>/dev/null
+NULL :=
+SPACE := $(NULL) # TRD : with a trailing space
+
+##### sources, objects and libraries #####
+BINNAME := test
+BINARY := $(BINDIR)/$(BINNAME)
+SRCDIRS := .
+SOURCES := main.c misc.c callbacks.c util_cmdline.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.o,$(notdir $(SOURCES)))
+SYSLIBS := -lm -lpthread -lrt
+USRLIBS := -ltest -lshared -llfds710
+LIBINCDIRS := $(subst $(SPACE), -I,$(LIBINCDIRS))
+LIBBINDIRS := $(subst $(SPACE), -L,$(LIBBINDIRS))
+
+##### tools #####
+DG := gcc
+DGFLAGS_MANDATORY := -MM
+DGFLAGS_OPTIONAL :=
+
+CC := gcc
+CFLAGS_MANDATORY := -c -pthread -I$(LIBINCDIRS)
+CFLAGS_OPTIONAL := -Wall -Werror -Wno-unknown-pragmas
+CFLAGS_MANDATORY_COV := -O0 -ggdb -DCOVERAGE -fprofile-arcs -ftest-coverage
+CFLAGS_MANDATORY_DBG := -O0 -ggdb -D_DEBUG
+CFLAGS_MANDATORY_PROF := -O0 -ggdb -DPROF -pg
+CFLAGS_MANDATORY_REL := -O2 -DNDEBUG
+CFLAGS_MANDATORY_TSAN := -O0 -ggdb -DTSAN -fsanitize=thread -fPIC
+
+LD := gcc
+LDFLAGS_MANDATORY := -pthread -L$(LIBBINDIRS)
+LDFLAGS_OPTIONAL := -Wall -Werror
+LDFLAGS_MANDATORY_COV := -O0 -fprofile-arcs -ftest-coverage
+LDFLAGS_MANDATORY_DBG := -O0 -ggdb
+LDFLAGS_MANDATORY_PROF := -O0 -pg
+LDFLAGS_MANDATORY_REL := -O2 -s
+LDFLAGS_MANDATORY_TSAN := -O0 -fsanitize=thread -pie
+
+##### build variants #####
+ifeq ($(MAKECMDGOALS),) # TRD : default to debug
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring cov,$(MAKECMDGOALS)),cov)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_COV)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_COV)
+ SYSLIBS += -lgcov
+endif
+
+ifeq ($(findstring dbg,$(MAKECMDGOALS)),dbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+endif
+
+ifeq ($(findstring prof,$(MAKECMDGOALS)),prof)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_PROF)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_PROF)
+endif
+
+ifeq ($(findstring rel,$(MAKECMDGOALS)),rel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+endif
+
+ifeq ($(findstring tsan,$(MAKECMDGOALS)),tsan)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_TSAN)
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_TSAN)
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.o : %.c
+ $(DG) $(DGFLAGS_OPTIONAL) $(DGFLAGS) $(DGFLAGS_MANDATORY) $< >$(OBJDIR)/$*.d
+ $(CC) $(CFLAGS_OPTIONAL) $(CFLAGS) $(CFLAGS_MANDATORY) -o $@ $<
+
+##### explicit rules #####
+$(BINARY) : $(OBJECTS)
+ $(LD) -o $(BINARY) $(LDFLAGS_OPTIONAL) $(LDFLAGS) $(LDFLAGS_MANDATORY) $(OBJECTS) $(USRLIBS) $(SYSLIBS)
+ chmod +x $(BINARY)
+
+##### phony #####
+.PHONY : clean cov dbg prof rel tsan vanilla
+
+clean :
+ @rm -f $(BINDIR)/$(BINNAME) $(OBJDIR)/*.o $(OBJDIR)/*.d $(OBJDIR)/*.gcda $(OBJDIR)/*.gcno
+
+cov : $(BINARY)
+dbg : $(BINARY)
+prof : $(BINARY)
+rel : $(BINARY)
+tsan : $(BINARY)
+vanilla : $(BINARY)
+
+##### dependencies #####
+-include $(DEPENDS)
+
--- /dev/null
+##### paths #####
+BINDIR := ..\..\bin
+OBJDIR := ..\..\obj
+SRCDIR := ..\..\src
+
+##### misc #####
+QUIETLY := 1>nul 2>nul
+NULL :=
+SPACE := $(NULL) # TRD : necessary trailing space after the close bracket
+
+##### sources, objects and libraries #####
+BINNAME := test
+BINARY := $(BINDIR)/$(BINNAME).exe
+SRCDIRS := .
+SOURCES := main.c misc.c callbacks.c util_cmdline.c
+OBJECTS := $(patsubst %.c,$(OBJDIR)/%.obj,$(notdir $(SOURCES)))
+SYSLIBS := kernel32.lib
+EXTLIBS :=
+USRLIBS := ../../../../liblfds710/bin/liblfds710.lib ../../../../test_and_benchmark/libshared/bin/libshared.lib ../../../../test_and_benchmark/libtest/bin/libtest.lib
+
+##### default paths fix up #####
+INCLUDE += ;../../../../liblfds710/inc/;../../../../test_and_benchmark/libshared/inc/;../../../../test_and_benchmark/libtest/inc/;
+LIB += ;../../../../liblfds710/bin/;../../../../test_and_benchmark/libshared/inc/;../../../../test_and_benchmark/libtest/inc/;
+
+##### tools #####
+CC := cl
+CFLAGS_MANDATORY :=
+CFLAGS_OPTIONAL := /nologo /W4 /WX /c "/Fd$(BINDIR)\$(BINNAME).pdb" /DUNICODE /D_UNICODE /D_CRT_SECURE_NO_DEPRECATE /DWIN32_LEAN_AND_MEAN
+CFLAGS_MANDATORY_DBG := /Od /Gm /Zi /D_DEBUG
+CFLAGS_MANDATORY_REL := /Ox /DNDEBUG
+
+LD := link
+LDFLAGS_MANDATORY := /nodefaultlib /subsystem:console
+LDFLAGS_OPTIONAL := /nologo /nxcompat /wx
+LDFLAGS_MANDATORY_DBG := /debug "/pdb:$(BINDIR)\$(BINNAME).pdb"
+LDFLAGS_MANDATORY_REL := /incremental:no
+
+##### build variants #####
+ifeq ($(MAKECMDGOALS),) # TRD : default to lib debug
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MTd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),libdbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MTd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := libcmtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),librel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL) /MT
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+ CLIB := libcmt.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dlldbg)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_DBG) /MDd
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_DBG)
+ CLIB := msvcrtd.lib
+endif
+
+ifeq ($(MAKECMDGOALS),dllrel)
+ CFLAGS_MANDATORY += $(CFLAGS_MANDATORY_REL) /MD
+ LDFLAGS_MANDATORY += $(LDFLAGS_MANDATORY_REL)
+ CLIB := msvcrt.lib
+endif
+
+##### search paths #####
+vpath %.c $(patsubst %,$(SRCDIR)/%:,$(SRCDIRS))
+
+##### implicit rules #####
+$(OBJDIR)/%.obj : %.c
+ $(CC) $(CFLAGS_OPTIONAL) $(CFLAGS) $(CFLAGS_MANDATORY) "/Fo$@" $<
+
+##### explicit rules #####
+$(BINARY) : $(OBJECTS)
+ $(LD) $(LDFLAGS_OPTIONAL) $(LDFLAGS) $(LDFLAGS_MANDATORY) $(CLIB) $(SYSLIBS) $(EXTLIBS) $(USRLIBS) $(OBJECTS) /out:$(BINARY)
+
+##### phony #####
+.PHONY : clean librel libdbg dllrel dlldbg
+
+clean :
+ @erase /Q $(OBJDIR)\*.obj $(OBJDIR)\*.res $(BINDIR)\$(BINNAME).* $(QUIETLY)
+
+dlldbg : $(BINARY)
+dllrel : $(BINARY)
+libdbg : $(BINARY)
+librel : $(BINARY)
+
+##### dependencies #####
+-include $(DEPENDS)
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void callback_test_start( char *test_name )
+{
+ assert( test_name != NULL );
+
+ printf( "%s...", test_name );
+ fflush( stdout );
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+void callback_test_finish( char *result )
+{
+ assert( result != NULL );
+
+ printf( "%s\n", result );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include <assert.h>
+#include <ctype.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include "../../../liblfds710/inc/liblfds710.h"
+#include "../../libtest/inc/libtest.h"
+#include "util_cmdline.h"
+
+/***** defines *****/
+#define and &&
+#define or ||
+
+#define BITS_PER_BYTE 8
+
+#define NO_FLAGS 0x0
+
+#define TEST_DEFAULT_TEST_MEMORY_IN_MEGABYTES 512
+#define ONE_MEGABYTE_IN_BYTES (1024 * 1024)
+
+#define TEST_VERSION_STRING "7.1.0"
+#define TEST_VERSION_INTEGER 710
+
+#if( defined KERNEL_MODE )
+ #define MODE_TYPE_STRING "kernel-mode"
+#endif
+
+#if( !defined KERNEL_MODE )
+ #define MODE_TYPE_STRING "user-mode"
+#endif
+
+#if( defined NDEBUG && !defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "release"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "debug"
+#endif
+
+#if( !defined NDEBUG && defined COVERAGE && !defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "coverage"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && defined TSAN && !defined PROF )
+ #define BUILD_TYPE_STRING "threadsanitizer"
+#endif
+
+#if( !defined NDEBUG && !defined COVERAGE && !defined TSAN && defined PROF )
+ #define BUILD_TYPE_STRING "profiling"
+#endif
+
+/***** enums *****/
+
+/***** structs *****/
+
+/***** externs *****/
+
+/***** prototypes *****/
+int main( int argc, char **argv );
+
+void callback_test_start( char *test_name );
+void callback_test_finish( char *result );
+
+void internal_show_version();
+void internal_logical_core_id_element_cleanup_callback( struct lfds710_list_asu_state *lasus, struct lfds710_list_asu_element *lasue );
+
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+int main( int argc, char **argv )
+{
+ enum flag
+ determine_erg_flag = LOWERED,
+ run_flag = LOWERED,
+ show_error_flag = LOWERED,
+ show_help_flag = LOWERED,
+ show_version_flag = LOWERED;
+
+ int
+ rv;
+
+ lfds710_pal_uint_t
+ loop,
+ iterations = 1,
+ memory_in_megabytes = TEST_DEFAULT_TEST_MEMORY_IN_MEGABYTES;
+
+ struct util_cmdline_state
+ cs;
+
+ union util_cmdline_arg_data
+ *arg_data;
+
+ assert( argc >= 1 );
+ assert( argv != NULL );
+
+ util_cmdline_init( &cs );
+
+ util_cmdline_add_arg( &cs, 'e', UTIL_CMDLINE_ARG_TYPE_FLAG );
+ util_cmdline_add_arg( &cs, 'h', UTIL_CMDLINE_ARG_TYPE_FLAG );
+ util_cmdline_add_arg( &cs, 'i', UTIL_CMDLINE_ARG_TYPE_INTEGER );
+ util_cmdline_add_arg( &cs, 'm', UTIL_CMDLINE_ARG_TYPE_INTEGER );
+ util_cmdline_add_arg( &cs, 'r', UTIL_CMDLINE_ARG_TYPE_FLAG );
+ util_cmdline_add_arg( &cs, 'v', UTIL_CMDLINE_ARG_TYPE_FLAG );
+
+ rv = util_cmdline_process_args( &cs, argc, argv );
+
+ if( rv == 0 )
+ show_error_flag = RAISED;
+
+ if( rv == 1 )
+ {
+ util_cmdline_get_arg_data( &cs, 'e', &arg_data );
+ if( arg_data != NULL )
+ determine_erg_flag = RAISED;
+
+ util_cmdline_get_arg_data( &cs, 'h', &arg_data );
+ if( arg_data != NULL )
+ show_help_flag = RAISED;
+
+ util_cmdline_get_arg_data( &cs, 'i', &arg_data );
+ if( arg_data != NULL )
+ {
+ if( arg_data->integer.integer < 1 )
+ {
+ puts( "Number of iterations needs to be 1 or greater." );
+ exit( EXIT_FAILURE );
+ }
+
+ iterations = (lfds710_pal_uint_t) arg_data->integer.integer;
+ }
+
+ util_cmdline_get_arg_data( &cs, 'm', &arg_data );
+ if( arg_data != NULL )
+ {
+ if( arg_data->integer.integer < 32 )
+ {
+ puts( "Memory for tests needs to be 32 or greater." );
+ exit( EXIT_FAILURE );
+ }
+
+ memory_in_megabytes = (lfds710_pal_uint_t) arg_data->integer.integer;
+ }
+
+ util_cmdline_get_arg_data( &cs, 'r', &arg_data );
+ if( arg_data != NULL )
+ run_flag = RAISED;
+
+ util_cmdline_get_arg_data( &cs, 'v', &arg_data );
+ if( arg_data != NULL )
+ show_version_flag = RAISED;
+ }
+
+ util_cmdline_cleanup( &cs );
+
+ if( argc == 1 or (run_flag == LOWERED and show_version_flag == LOWERED) )
+ show_help_flag = RAISED;
+
+ if( show_error_flag == RAISED )
+ {
+ printf( "\nInvalid arguments. Sorry - it's a simple parser, so no clues.\n"
+ "-h or run with no args to see the help text.\n" );
+
+ return EXIT_SUCCESS;
+ }
+
+ if( determine_erg_flag == RAISED )
+ {
+ enum libtest_misc_determine_erg_result
+ der;
+
+ lfds710_pal_uint_t
+ count_array[10],
+ erg_size_in_bytes;
+
+ struct libshared_memory_state
+ ms;
+
+ void
+ *memory;
+
+ memory = malloc( ONE_MEGABYTE_IN_BYTES );
+
+ libshared_memory_init( &ms );
+
+ libshared_memory_add_memory( &ms, memory, ONE_MEGABYTE_IN_BYTES );
+
+ libtest_misc_determine_erg( &ms, &count_array, &der, &erg_size_in_bytes );
+
+ if( der == LIBTEST_MISC_DETERMINE_ERG_RESULT_NOT_SUPPORTED )
+ printf( "Determine ERG not supported on the current platform.\n" );
+ else
+ {
+ printf( "\n"
+ "Results\n"
+ "=======\n"
+ "\n" );
+
+ printf( " ERG length in bytes : Number successful LL/SC ops\n"
+ " =================================================\n" );
+
+ for( loop = 0 ; loop < 10 ; loop++ )
+ printf( " %lu bytes : %llu\n", 1UL << (loop+2), (int long long unsigned) count_array[loop] );
+
+ printf( "\n"
+ "Conclusions\n"
+ "===========\n"
+ "\n" );
+
+ switch( der )
+ {
+ case LIBTEST_MISC_DETERMINE_ERG_RESULT_SUCCESS:
+ printf( " The smallest ERG size with successful results is %llu bytes, which\n"
+ " is therefore hopefully if all has gone well the ERG size.\n"
+ " \n"
+ " In the file 'lfds710_porting_abstraction_layer_processor.h', in all\n"
+ " the sections for ARM, please replace the existing the value of the\n"
+ " #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES to %llu, like so;\n"
+ " \n"
+ " #define LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES %llu\n",
+ (int long long unsigned) erg_size_in_bytes,
+ (int long long unsigned) erg_size_in_bytes,
+ (int long long unsigned) erg_size_in_bytes );
+ break;
+
+ case LIBTEST_MISC_DETERMINE_ERG_RESULT_ONE_PHYSICAL_CORE:
+ printf( " This system has only one physical core, and as such this\n"
+ " code cannot determine the ERG.\n" );
+ break;
+
+ case LIBTEST_MISC_DETERMINE_ERG_RESULT_ONE_PHYSICAL_CORE_OR_NO_LLSC:
+ printf( " The results are indeterminate. Either this system has only one\n"
+ " physical core, and as such this code cannot determine the ERG, or\n"
+ " the system has no support for LL/SC.\n" );
+ break;
+
+ case LIBTEST_MISC_DETERMINE_ERG_RESULT_NO_LLSC:
+ printf( " There appears to be no LL/SC support on the current platform.\n" );
+ break;
+
+ case LIBTEST_MISC_DETERMINE_ERG_RESULT_NOT_SUPPORTED:
+ printf( " Determine ERG not supported on the current platform.\n" );
+ break;
+ }
+
+ printf( "\n"
+ "Explanations\n"
+ "============\n"
+ "\n"
+ " This code is for ARM and works to empirically determine the ERG size,\n"
+ " which is the value which needs to be used for the #define\n"
+ " LFDS710_PAL_ATOMIC_ISOLATION_IN_BYTES in the file\n"
+ " 'lfds710_porting_abstraction_layer_processor.h'.\n"
+ " \n"
+ " It is VERY VERY IMPORTANT to set this value because the default on ARM\n"
+ " is the worst-case, which is 2048 bytes, and this makes all the lfds\n"
+ " data structure structs HUGE.\n"
+ " \n"
+ " If this value is set too small, then absolutely none of the liblfds data\n"
+ " structures should work *at all*, so getting it wrong should be very obvious.\n"
+ " \n"
+ " Each ERG length is tried 1024 times. All ERG sizes which are smaller than the\n"
+ " actual ERG size should have 0 successful ops. All ERG sizes equal to or\n"
+ " greater than the actual ERG size should have 1024, or maybe a few less,\n"
+ " successful ops. A few spurious failures are not unusual, it's the\n"
+ " nature of LL/SC, so it's normal. The correct ERG size then is the smallest\n"
+ " size which has about 1024 successful ops.\n"
+ " \n"
+ " This code however can only work if there are at least two physical cores\n"
+ " in the system. It's not enough to have one physical core with multiple\n"
+ " logical cores. If the ERG size of 4 bytes has any successes, then the\n"
+ " current systems has only a single physical processor, or it has no\n"
+ " support for LL/SC (which can happen - there are SO many ARM system\n"
+ " variants).\n" );
+ }
+
+ return EXIT_SUCCESS;
+ }
+
+ if( show_help_flag == RAISED )
+ {
+ printf( "test -e -h -i [n] -m [n] -r -v\n"
+ " -e : empirically determine Exclusive Reservation Granule\n"
+ " (currently supports only ARM32)\n"
+ " -h : help\n"
+ " -i [n] : number of iterations (default : 1)\n"
+ " -m [n] : memory for tests, in mb (default : %u)\n"
+ " -r : run (causes test to run; present so no args gives help)\n"
+ " -v : version\n", (int unsigned) TEST_DEFAULT_TEST_MEMORY_IN_MEGABYTES );
+
+ return EXIT_SUCCESS;
+ }
+
+ if( show_version_flag == RAISED )
+ {
+ internal_show_version();
+ return EXIT_SUCCESS;
+ }
+
+ if( run_flag == RAISED )
+ {
+ struct libshared_memory_state
+ ms;
+
+ struct libtest_results_state
+ rs;
+
+ struct libtest_testsuite_state
+ ts;
+
+ void
+ *test_memory;
+
+ test_memory = malloc( memory_in_megabytes * ONE_MEGABYTE_IN_BYTES );
+
+ libshared_memory_init( &ms );
+
+ libshared_memory_add_memory( &ms, test_memory, memory_in_megabytes * ONE_MEGABYTE_IN_BYTES );
+
+ libtest_testsuite_init( &ts, &ms, callback_test_start, callback_test_finish );
+
+ for( loop = 0 ; loop < (lfds710_pal_uint_t) iterations ; loop++ )
+ {
+ libtest_results_init( &rs );
+
+ printf( "\n"
+ "Test Iteration %02llu\n"
+ "=================\n", (int long long unsigned) (loop+1) );
+
+ libtest_testsuite_run( &ts, &rs );
+
+ libtest_results_cleanup( &rs );
+ }
+
+ libtest_testsuite_cleanup( &ts );
+
+ libshared_memory_cleanup( &ms, NULL );
+
+ free( test_memory );
+ }
+
+ return EXIT_SUCCESS;
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void internal_show_version()
+{
+ char const
+ *version_and_build_string;
+
+ char static const
+ * const local_build_and_version_string = "test " TEST_VERSION_STRING " (" BUILD_TYPE_STRING ", " MODE_TYPE_STRING ")";
+
+ printf( "%s\n", local_build_and_version_string );
+
+ libshared_misc_query( LIBSHARED_MISC_QUERY_GET_BUILD_AND_VERSION_STRING, NULL, (void **) &version_and_build_string );
+
+ printf( "%s\n", version_and_build_string );
+
+ libtest_misc_query( LIBTEST_MISC_QUERY_GET_BUILD_AND_VERSION_STRING, NULL, (void **) &version_and_build_string );
+
+ printf( "%s\n", version_and_build_string );
+
+ lfds710_misc_query( LFDS710_MISC_QUERY_GET_BUILD_AND_VERSION_STRING, NULL, (void **) &version_and_build_string );
+
+ printf( "%s\n", version_and_build_string );
+
+ return;
+}
+
--- /dev/null
+/***** includes *****/
+#include "internal.h"
+
+
+
+
+
+/****************************************************************************/
+void util_cmdline_init( struct util_cmdline_state *cs )
+{
+ lfds710_pal_uint_t
+ loop;
+
+ assert( cs != NULL );
+
+ for( loop = 0 ; loop < NUMBER_OF_LOWERCASE_LETTERS_IN_LATIN_ALPHABET ; loop++ )
+ {
+ cs->args[loop].arg_type = UTIL_CMDLINE_ARG_TYPE_UNSET;
+ cs->args[loop].processed_flag = LOWERED;
+ }
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+#pragma warning( disable : 4100 )
+
+void util_cmdline_cleanup( struct util_cmdline_state *cs )
+{
+ assert( cs != NULL );
+
+ return;
+}
+
+#pragma warning( default : 4100 )
+
+
+
+
+
+/****************************************************************************/
+void util_cmdline_add_arg( struct util_cmdline_state *cs, char arg_letter, enum util_cmdline_arg_type arg_type )
+{
+ lfds710_pal_uint_t
+ index;
+
+ assert( cs != NULL );
+ assert( arg_letter >= 'a' and arg_letter <= 'z' );
+ // TRD : arg_type can be any value in its range
+
+ index = arg_letter - 'a';
+
+ cs->args[index].arg_type = arg_type;
+
+ if( arg_type == UTIL_CMDLINE_ARG_TYPE_FLAG )
+ cs->args[index].arg_data.flag.flag = LOWERED;
+
+ return;
+}
+
+
+
+
+
+/****************************************************************************/
+int util_cmdline_process_args( struct util_cmdline_state *cs, int argc, char **argv )
+{
+ char
+ *arg;
+
+ int
+ arg_letter,
+ cc,
+ loop,
+ rv = 1;
+
+ lfds710_pal_uint_t
+ index;
+
+ assert( cs != NULL );
+ assert( argc >= 1 );
+ assert( argv != NULL );
+
+ for( loop = 1 ; loop < argc ; loop++ )
+ {
+ arg = *(argv+loop);
+
+ switch( *arg )
+ {
+ case '-':
+ arg_letter = tolower( *(arg+1) );
+
+ if( arg_letter >= 'a' and arg_letter <= 'z' )
+ {
+ index = arg_letter - 'a';
+
+ switch( cs->args[index].arg_type )
+ {
+ case UTIL_CMDLINE_ARG_TYPE_INTEGER_RANGE:
+ if( loop+1 >= argc )
+ rv = 0;
+
+ if( loop+1 < argc )
+ {
+ cc = sscanf( *(argv+loop+1), "%llu-%llu", &cs->args[index].arg_data.integer_range.integer_start, &cs->args[index].arg_data.integer_range.integer_end );
+
+ if( cc != 2 )
+ rv = 0;
+
+ if( cc == 2 )
+ {
+ cs->args[index].processed_flag = RAISED;
+ loop++;
+ }
+ }
+ break;
+
+ case UTIL_CMDLINE_ARG_TYPE_INTEGER:
+ if( loop+1 >= argc )
+ rv = 0;
+
+ if( loop+1 < argc )
+ {
+ cc = sscanf( *(argv+loop+1), "%llu", &cs->args[index].arg_data.integer.integer );
+
+ if( cc != 1 )
+ rv = 0;
+
+ if( cc == 1 )
+ {
+ cs->args[index].processed_flag = RAISED;
+ loop++;
+ }
+ }
+ break;
+
+ case UTIL_CMDLINE_ARG_TYPE_FLAG:
+ cs->args[index].arg_data.flag.flag = RAISED;
+ cs->args[index].processed_flag = RAISED;
+ break;
+
+ case UTIL_CMDLINE_ARG_TYPE_UNSET:
+ break;
+ }
+ }
+ break;
+
+ default:
+ rv = 0;
+ break;
+ }
+ }
+
+ return rv;
+}
+
+
+
+
+
+/****************************************************************************/
+void util_cmdline_get_arg_data( struct util_cmdline_state *cs, char arg_letter, union util_cmdline_arg_data **arg_data )
+{
+ lfds710_pal_uint_t
+ index;
+
+ assert( cs != NULL );
+ assert( arg_letter >= 'a' and arg_letter <= 'z' );
+ assert( arg_data != NULL );
+
+ index = arg_letter - 'a';
+
+ if( cs->args[index].processed_flag == RAISED )
+ *arg_data = &cs->args[index].arg_data;
+ else
+ *arg_data = NULL;
+
+ return;
+}
+
--- /dev/null
+/***** defines *****/
+#define NUMBER_OF_LOWERCASE_LETTERS_IN_LATIN_ALPHABET 26
+
+/***** enums *****/
+enum util_cmdline_arg_type
+{
+ UTIL_CMDLINE_ARG_TYPE_INTEGER_RANGE,
+ UTIL_CMDLINE_ARG_TYPE_INTEGER,
+ UTIL_CMDLINE_ARG_TYPE_FLAG,
+ UTIL_CMDLINE_ARG_TYPE_UNSET
+};
+
+/***** structs *****/
+struct util_cmdline_arg_integer_range
+{
+ int long long unsigned
+ integer_start,
+ integer_end;
+};
+
+struct util_cmdline_arg_integer
+{
+ int long long unsigned
+ integer;
+};
+
+struct util_cmdline_arg_flag
+{
+ enum flag
+ flag;
+};
+
+union util_cmdline_arg_data
+{
+ struct util_cmdline_arg_integer_range
+ integer_range;
+
+ struct util_cmdline_arg_integer
+ integer;
+
+ struct util_cmdline_arg_flag
+ flag;
+};
+
+struct util_cmdline_arg_letter_and_data
+{
+ enum util_cmdline_arg_type
+ arg_type;
+
+ enum flag
+ processed_flag;
+
+ union util_cmdline_arg_data
+ arg_data;
+};
+
+struct util_cmdline_state
+{
+ struct util_cmdline_arg_letter_and_data
+ args[NUMBER_OF_LOWERCASE_LETTERS_IN_LATIN_ALPHABET];
+};
+
+/***** public protoypes *****/
+void util_cmdline_init( struct util_cmdline_state *cs );
+void util_cmdline_cleanup( struct util_cmdline_state *cs );
+void util_cmdline_add_arg( struct util_cmdline_state *cs, char arg_letter, enum util_cmdline_arg_type arg_type );
+int util_cmdline_process_args( struct util_cmdline_state *cs, int argc, char **argv );
+void util_cmdline_get_arg_data( struct util_cmdline_state *cs, char arg_letter, union util_cmdline_arg_data **arg_data );
+
--- /dev/null
+benchmark : command line veneer calling the libbenchmark API
+libbenchmark : the benchmarkng code
+libshared : various shared APIs'n'stuff, everyone else depends on this
+libtest : the testing code
+test : command line veneer calling the libtest API
+
+TO TEST : compile libshared, libtest and test.
+
+TO BENCHMARK : compile libshared, libbenchmark and benchmark.
+
+test and benchmark are veneers only - they give a nice command line wrapper
+around libtest and libbenchmark; they have no actual real code in. If you
+want to run test or benchmark on platforms without a command line, just
+compile libshared and libtest or libbenchmark, and call the test or the
+benchmark API directly - they're designed to be zero work for the caller.
+
--- /dev/null
+Versioning is [major version].[minor version].[bugfix version]
+
+REMEMBER : Every version exists in its own name space. You can link and compile against every version concurrently.
+
+NOTE : This is not true for 6.0.0 and 6.1.0, because one public entity was overlooked and missed being placed in
+ the name space for that version - this was fixed in 6.0.1 and 6.1.1.
+