| 1 | /* |
| 2 | Copyright 2005-2013 Intel Corporation. All Rights Reserved. |
| 3 | |
| 4 | This file is part of Threading Building Blocks. |
| 5 | |
| 6 | Threading Building Blocks is free software; you can redistribute it |
| 7 | and/or modify it under the terms of the GNU General Public License |
| 8 | version 2 as published by the Free Software Foundation. |
| 9 | |
| 10 | Threading Building Blocks is distributed in the hope that it will be |
| 11 | useful, but WITHOUT ANY WARRANTY; without even the implied warranty |
| 12 | of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | GNU General Public License for more details. |
| 14 | |
| 15 | You should have received a copy of the GNU General Public License |
| 16 | along with Threading Building Blocks; if not, write to the Free Software |
| 17 | Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| 18 | |
| 19 | As a special exception, you may use this file as part of a free software |
| 20 | library without restriction. Specifically, if other files instantiate |
| 21 | templates or use macros or inline functions from this file, or you compile |
| 22 | this file and link it with other files to produce an executable, this |
| 23 | file does not by itself cause the resulting executable to be covered by |
| 24 | the GNU General Public License. This exception does not however |
| 25 | invalidate any other reasons why the executable file might be covered by |
| 26 | the GNU General Public License. |
| 27 | */ |
| 28 | |
| 29 | #if !defined(__TBB_machine_H) || defined(__TBB_machine_linux_intel64_H) |
| 30 | #error Do not #include this internal file directly; use public TBB headers instead. |
| 31 | #endif |
| 32 | |
| 33 | #define __TBB_machine_linux_intel64_H |
| 34 | |
| 35 | #include <stdint.h> |
| 36 | #include "gcc_ia32_common.h" |
| 37 | |
| 38 | #define __TBB_WORDSIZE 8 |
| 39 | #define __TBB_ENDIANNESS __TBB_ENDIAN_LITTLE |
| 40 | |
| 41 | #define __TBB_compiler_fence() __asm__ __volatile__("": : :"memory") |
| 42 | #define __TBB_control_consistency_helper() __TBB_compiler_fence() |
| 43 | #define __TBB_acquire_consistency_helper() __TBB_compiler_fence() |
| 44 | #define __TBB_release_consistency_helper() __TBB_compiler_fence() |
| 45 | |
| 46 | #ifndef __TBB_full_memory_fence |
| 47 | #define __TBB_full_memory_fence() __asm__ __volatile__("mfence": : :"memory") |
| 48 | #endif |
| 49 | |
| 50 | #define __TBB_MACHINE_DEFINE_ATOMICS(S,T,X) \ |
| 51 | static inline T __TBB_machine_cmpswp##S (volatile void *ptr, T value, T comparand ) \ |
| 52 | { \ |
| 53 | T result; \ |
| 54 | \ |
| 55 | __asm__ __volatile__("lock\ncmpxchg" X " %2,%1" \ |
| 56 | : "=a"(result), "=m"(*(volatile T*)ptr) \ |
| 57 | : "q"(value), "0"(comparand), "m"(*(volatile T*)ptr) \ |
| 58 | : "memory"); \ |
| 59 | return result; \ |
| 60 | } \ |
| 61 | \ |
| 62 | static inline T __TBB_machine_fetchadd##S(volatile void *ptr, T addend) \ |
| 63 | { \ |
| 64 | T result; \ |
| 65 | __asm__ __volatile__("lock\nxadd" X " %0,%1" \ |
| 66 | : "=r"(result),"=m"(*(volatile T*)ptr) \ |
| 67 | : "0"(addend), "m"(*(volatile T*)ptr) \ |
| 68 | : "memory"); \ |
| 69 | return result; \ |
| 70 | } \ |
| 71 | \ |
| 72 | static inline T __TBB_machine_fetchstore##S(volatile void *ptr, T value) \ |
| 73 | { \ |
| 74 | T result; \ |
| 75 | __asm__ __volatile__("lock\nxchg" X " %0,%1" \ |
| 76 | : "=r"(result),"=m"(*(volatile T*)ptr) \ |
| 77 | : "0"(value), "m"(*(volatile T*)ptr) \ |
| 78 | : "memory"); \ |
| 79 | return result; \ |
| 80 | } \ |
| 81 | |
| 82 | __TBB_MACHINE_DEFINE_ATOMICS(1,int8_t,"" ) |
| 83 | __TBB_MACHINE_DEFINE_ATOMICS(2,int16_t,"" ) |
| 84 | __TBB_MACHINE_DEFINE_ATOMICS(4,int32_t,"" ) |
| 85 | __TBB_MACHINE_DEFINE_ATOMICS(8,int64_t,"q" ) |
| 86 | |
| 87 | #undef __TBB_MACHINE_DEFINE_ATOMICS |
| 88 | |
| 89 | static inline void __TBB_machine_or( volatile void *ptr, uint64_t value ) { |
| 90 | __asm__ __volatile__("lock\norq %1,%0" : "=m" (*(volatile uint64_t*)ptr) : "r" (value), "m" (*(volatile uint64_t*)ptr) : "memory" ); |
| 91 | } |
| 92 | |
| 93 | static inline void __TBB_machine_and( volatile void *ptr, uint64_t value ) { |
| 94 | __asm__ __volatile__("lock\nandq %1,%0" : "=m" (*(volatile uint64_t*)ptr) : "r" (value), "m" (*(volatile uint64_t*)ptr) : "memory" ); |
| 95 | } |
| 96 | |
| 97 | #define __TBB_AtomicOR(P,V) __TBB_machine_or(P,V) |
| 98 | #define __TBB_AtomicAND(P,V) __TBB_machine_and(P,V) |
| 99 | |
| 100 | #define __TBB_USE_FETCHSTORE_AS_FULL_FENCED_STORE 1 |
| 101 | #define __TBB_USE_GENERIC_HALF_FENCED_LOAD_STORE 1 |
| 102 | #define __TBB_USE_GENERIC_RELAXED_LOAD_STORE 1 |
| 103 | #define __TBB_USE_GENERIC_SEQUENTIAL_CONSISTENCY_LOAD_STORE 1 |
| 104 | |
| 105 | |