1 | /* |
2 | Copyright (c) 2005-2019 Intel Corporation |
3 | |
4 | Licensed under the Apache License, Version 2.0 (the "License"); |
5 | you may not use this file except in compliance with the License. |
6 | You may obtain a copy of the License at |
7 | |
8 | http://www.apache.org/licenses/LICENSE-2.0 |
9 | |
10 | Unless required by applicable law or agreed to in writing, software |
11 | distributed under the License is distributed on an "AS IS" BASIS, |
12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
13 | See the License for the specific language governing permissions and |
14 | limitations under the License. |
15 | */ |
16 | |
17 | #ifndef harness_tbb_independence_H |
18 | #define harness_tbb_independence_H |
19 | |
20 | // The tests which include tbb/atomic.h gain the dependency on the __TBB_ASSERT |
21 | // implementation even the test does not use anything from it. But almost all |
22 | // compilers optimize out unused inline function so they throw out the |
23 | // dependency. But to be pedantic with the standard the __TBB_ASSERT |
24 | // implementation should be provided. Moreover the offload compiler really |
25 | // requires it. |
26 | #include "../tbb/tbb_assert_impl.h" |
27 | |
28 | #if __linux__ && __ia64__ |
29 | |
30 | #define __TBB_NO_IMPLICIT_LINKAGE 1 |
31 | #include "tbb/tbb_machine.h" |
32 | |
33 | #include <pthread.h> |
34 | |
35 | // Can't use Intel compiler intrinsic due to internal error reported by 10.1 compiler |
36 | pthread_mutex_t counter_mutex = PTHREAD_MUTEX_INITIALIZER; |
37 | |
38 | int32_t __TBB_machine_fetchadd4__TBB_full_fence (volatile void *ptr, int32_t value) |
39 | { |
40 | pthread_mutex_lock(&counter_mutex); |
41 | int32_t result = *(int32_t*)ptr; |
42 | *(int32_t*)ptr = result + value; |
43 | pthread_mutex_unlock(&counter_mutex); |
44 | return result; |
45 | } |
46 | |
47 | int64_t __TBB_machine_fetchadd8__TBB_full_fence (volatile void *ptr, int64_t value) |
48 | { |
49 | pthread_mutex_lock(&counter_mutex); |
50 | int32_t result = *(int32_t*)ptr; |
51 | *(int32_t*)ptr = result + value; |
52 | pthread_mutex_unlock(&counter_mutex); |
53 | return result; |
54 | } |
55 | |
56 | void __TBB_machine_pause(int32_t /*delay*/) { __TBB_Yield(); } |
57 | |
58 | pthread_mutex_t cas_mutex = PTHREAD_MUTEX_INITIALIZER; |
59 | |
60 | extern "C" int64_t __TBB_machine_cmpswp8__TBB_full_fence(volatile void *ptr, int64_t value, int64_t comparand) |
61 | { |
62 | pthread_mutex_lock(&cas_mutex); |
63 | int64_t result = *(int64_t*)ptr; |
64 | if (result == comparand) |
65 | *(int64_t*)ptr = value; |
66 | pthread_mutex_unlock(&cas_mutex); |
67 | return result; |
68 | } |
69 | |
70 | pthread_mutex_t fetchstore_mutex = PTHREAD_MUTEX_INITIALIZER; |
71 | |
72 | int64_t __TBB_machine_fetchstore8__TBB_full_fence (volatile void *ptr, int64_t value) |
73 | { |
74 | pthread_mutex_lock(&fetchstore_mutex); |
75 | int64_t result = *(int64_t*)ptr; |
76 | *(int64_t*)ptr = value; |
77 | pthread_mutex_unlock(&fetchstore_mutex); |
78 | return result; |
79 | } |
80 | |
81 | #endif /* __linux__ && __ia64 */ |
82 | |
83 | #endif // harness_tbb_independence_H |
84 | |