1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11#ifndef ZSTD_COMPILER_H
12#define ZSTD_COMPILER_H
13
14#include "portability_macros.h"
15
16/*-*******************************************************
17* Compiler specifics
18*********************************************************/
19/* force inlining */
20
21#if !defined(ZSTD_NO_INLINE)
22#if (defined(__GNUC__) && !defined(__STRICT_ANSI__)) || defined(__cplusplus) || defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
23# define INLINE_KEYWORD inline
24#else
25# define INLINE_KEYWORD
26#endif
27
28#if defined(__GNUC__) || defined(__ICCARM__)
29# define FORCE_INLINE_ATTR __attribute__((always_inline))
30#elif defined(_MSC_VER)
31# define FORCE_INLINE_ATTR __forceinline
32#else
33# define FORCE_INLINE_ATTR
34#endif
35
36#else
37
38#define INLINE_KEYWORD
39#define FORCE_INLINE_ATTR
40
41#endif
42
43/**
44 On MSVC qsort requires that functions passed into it use the __cdecl calling conversion(CC).
45 This explicitly marks such functions as __cdecl so that the code will still compile
46 if a CC other than __cdecl has been made the default.
47*/
48#if defined(_MSC_VER)
49# define WIN_CDECL __cdecl
50#else
51# define WIN_CDECL
52#endif
53
54/**
55 * FORCE_INLINE_TEMPLATE is used to define C "templates", which take constant
56 * parameters. They must be inlined for the compiler to eliminate the constant
57 * branches.
58 */
59#define FORCE_INLINE_TEMPLATE static INLINE_KEYWORD FORCE_INLINE_ATTR
60/**
61 * HINT_INLINE is used to help the compiler generate better code. It is *not*
62 * used for "templates", so it can be tweaked based on the compilers
63 * performance.
64 *
65 * gcc-4.8 and gcc-4.9 have been shown to benefit from leaving off the
66 * always_inline attribute.
67 *
68 * clang up to 5.0.0 (trunk) benefit tremendously from the always_inline
69 * attribute.
70 */
71#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ >= 4 && __GNUC_MINOR__ >= 8 && __GNUC__ < 5
72# define HINT_INLINE static INLINE_KEYWORD
73#else
74# define HINT_INLINE static INLINE_KEYWORD FORCE_INLINE_ATTR
75#endif
76
77/* UNUSED_ATTR tells the compiler it is okay if the function is unused. */
78#if defined(__GNUC__)
79# define UNUSED_ATTR __attribute__((unused))
80#else
81# define UNUSED_ATTR
82#endif
83
84/* force no inlining */
85#ifdef _MSC_VER
86# define FORCE_NOINLINE static __declspec(noinline)
87#else
88# if defined(__GNUC__) || defined(__ICCARM__)
89# define FORCE_NOINLINE static __attribute__((__noinline__))
90# else
91# define FORCE_NOINLINE static
92# endif
93#endif
94
95
96/* target attribute */
97#if defined(__GNUC__) || defined(__ICCARM__)
98# define TARGET_ATTRIBUTE(target) __attribute__((__target__(target)))
99#else
100# define TARGET_ATTRIBUTE(target)
101#endif
102
103/* Target attribute for BMI2 dynamic dispatch.
104 * Enable lzcnt, bmi, and bmi2.
105 * We test for bmi1 & bmi2. lzcnt is included in bmi1.
106 */
107#define BMI2_TARGET_ATTRIBUTE TARGET_ATTRIBUTE("lzcnt,bmi,bmi2")
108
109/* prefetch
110 * can be disabled, by declaring NO_PREFETCH build macro */
111#if defined(NO_PREFETCH)
112# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
113# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
114#else
115# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86)) /* _mm_prefetch() is not defined outside of x86/x64 */
116# include <mmintrin.h> /* https://msdn.microsoft.com/fr-fr/library/84szxsww(v=vs.90).aspx */
117# define PREFETCH_L1(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T0)
118# define PREFETCH_L2(ptr) _mm_prefetch((const char*)(ptr), _MM_HINT_T1)
119# elif defined(__GNUC__) && ( (__GNUC__ >= 4) || ( (__GNUC__ == 3) && (__GNUC_MINOR__ >= 1) ) )
120# define PREFETCH_L1(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 3 /* locality */)
121# define PREFETCH_L2(ptr) __builtin_prefetch((ptr), 0 /* rw==read */, 2 /* locality */)
122# elif defined(__aarch64__)
123# define PREFETCH_L1(ptr) __asm__ __volatile__("prfm pldl1keep, %0" ::"Q"(*(ptr)))
124# define PREFETCH_L2(ptr) __asm__ __volatile__("prfm pldl2keep, %0" ::"Q"(*(ptr)))
125# else
126# define PREFETCH_L1(ptr) (void)(ptr) /* disabled */
127# define PREFETCH_L2(ptr) (void)(ptr) /* disabled */
128# endif
129#endif /* NO_PREFETCH */
130
131#define CACHELINE_SIZE 64
132
133#define PREFETCH_AREA(p, s) { \
134 const char* const _ptr = (const char*)(p); \
135 size_t const _size = (size_t)(s); \
136 size_t _pos; \
137 for (_pos=0; _pos<_size; _pos+=CACHELINE_SIZE) { \
138 PREFETCH_L2(_ptr + _pos); \
139 } \
140}
141
142/* vectorization
143 * older GCC (pre gcc-4.3 picked as the cutoff) uses a different syntax,
144 * and some compilers, like Intel ICC and MCST LCC, do not support it at all. */
145#if !defined(__INTEL_COMPILER) && !defined(__clang__) && defined(__GNUC__) && !defined(__LCC__)
146# if (__GNUC__ == 4 && __GNUC_MINOR__ > 3) || (__GNUC__ >= 5)
147# define DONT_VECTORIZE __attribute__((optimize("no-tree-vectorize")))
148# else
149# define DONT_VECTORIZE _Pragma("GCC optimize(\"no-tree-vectorize\")")
150# endif
151#else
152# define DONT_VECTORIZE
153#endif
154
155/* Tell the compiler that a branch is likely or unlikely.
156 * Only use these macros if it causes the compiler to generate better code.
157 * If you can remove a LIKELY/UNLIKELY annotation without speed changes in gcc
158 * and clang, please do.
159 */
160#if defined(__GNUC__)
161#define LIKELY(x) (__builtin_expect((x), 1))
162#define UNLIKELY(x) (__builtin_expect((x), 0))
163#else
164#define LIKELY(x) (x)
165#define UNLIKELY(x) (x)
166#endif
167
168#if __has_builtin(__builtin_unreachable) || (defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5)))
169# define ZSTD_UNREACHABLE { assert(0), __builtin_unreachable(); }
170#else
171# define ZSTD_UNREACHABLE { assert(0); }
172#endif
173
174/* disable warnings */
175#ifdef _MSC_VER /* Visual Studio */
176# include <intrin.h> /* For Visual 2005 */
177# pragma warning(disable : 4100) /* disable: C4100: unreferenced formal parameter */
178# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
179# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
180# pragma warning(disable : 4214) /* disable: C4214: non-int bitfields */
181# pragma warning(disable : 4324) /* disable: C4324: padded structure */
182#endif
183
184/*Like DYNAMIC_BMI2 but for compile time determination of BMI2 support*/
185#ifndef STATIC_BMI2
186# if defined(_MSC_VER) && (defined(_M_X64) || defined(_M_I86))
187# ifdef __AVX2__ //MSVC does not have a BMI2 specific flag, but every CPU that supports AVX2 also supports BMI2
188# define STATIC_BMI2 1
189# endif
190# elif defined(__BMI2__) && defined(__x86_64__) && defined(__GNUC__)
191# define STATIC_BMI2 1
192# endif
193#endif
194
195#ifndef STATIC_BMI2
196 #define STATIC_BMI2 0
197#endif
198
199/* compile time determination of SIMD support */
200#if !defined(ZSTD_NO_INTRINSICS)
201# if defined(__SSE2__) || defined(_M_AMD64) || (defined (_M_IX86) && defined(_M_IX86_FP) && (_M_IX86_FP >= 2))
202# define ZSTD_ARCH_X86_SSE2
203# endif
204# if defined(__ARM_NEON) || defined(_M_ARM64)
205# define ZSTD_ARCH_ARM_NEON
206# endif
207#
208# if defined(ZSTD_ARCH_X86_SSE2)
209# include <emmintrin.h>
210# elif defined(ZSTD_ARCH_ARM_NEON)
211# include <arm_neon.h>
212# endif
213#endif
214
215/* C-language Attributes are added in C23. */
216#if defined(__STDC_VERSION__) && (__STDC_VERSION__ > 201710L) && defined(__has_c_attribute)
217# define ZSTD_HAS_C_ATTRIBUTE(x) __has_c_attribute(x)
218#else
219# define ZSTD_HAS_C_ATTRIBUTE(x) 0
220#endif
221
222/* Only use C++ attributes in C++. Some compilers report support for C++
223 * attributes when compiling with C.
224 */
225#if defined(__cplusplus) && defined(__has_cpp_attribute)
226# define ZSTD_HAS_CPP_ATTRIBUTE(x) __has_cpp_attribute(x)
227#else
228# define ZSTD_HAS_CPP_ATTRIBUTE(x) 0
229#endif
230
231/* Define ZSTD_FALLTHROUGH macro for annotating switch case with the 'fallthrough' attribute.
232 * - C23: https://en.cppreference.com/w/c/language/attributes/fallthrough
233 * - CPP17: https://en.cppreference.com/w/cpp/language/attributes/fallthrough
234 * - Else: __attribute__((__fallthrough__))
235 */
236#ifndef ZSTD_FALLTHROUGH
237# if ZSTD_HAS_C_ATTRIBUTE(fallthrough)
238# define ZSTD_FALLTHROUGH [[fallthrough]]
239# elif ZSTD_HAS_CPP_ATTRIBUTE(fallthrough)
240# define ZSTD_FALLTHROUGH [[fallthrough]]
241# elif __has_attribute(__fallthrough__)
242/* Leading semicolon is to satisfy gcc-11 with -pedantic. Without the semicolon
243 * gcc complains about: a label can only be part of a statement and a declaration is not a statement.
244 */
245# define ZSTD_FALLTHROUGH ; __attribute__((__fallthrough__))
246# else
247# define ZSTD_FALLTHROUGH
248# endif
249#endif
250
251/*-**************************************************************
252* Alignment check
253*****************************************************************/
254
255/* this test was initially positioned in mem.h,
256 * but this file is removed (or replaced) for linux kernel
257 * so it's now hosted in compiler.h,
258 * which remains valid for both user & kernel spaces.
259 */
260
261#ifndef ZSTD_ALIGNOF
262# if defined(__GNUC__) || defined(_MSC_VER)
263/* covers gcc, clang & MSVC */
264/* note : this section must come first, before C11,
265 * due to a limitation in the kernel source generator */
266# define ZSTD_ALIGNOF(T) __alignof(T)
267
268# elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
269/* C11 support */
270# include <stdalign.h>
271# define ZSTD_ALIGNOF(T) alignof(T)
272
273# else
274/* No known support for alignof() - imperfect backup */
275# define ZSTD_ALIGNOF(T) (sizeof(void*) < sizeof(T) ? sizeof(void*) : sizeof(T))
276
277# endif
278#endif /* ZSTD_ALIGNOF */
279
280/*-**************************************************************
281* Sanitizer
282*****************************************************************/
283
284/* Issue #3240 reports an ASAN failure on an llvm-mingw build. Out of an
285 * abundance of caution, disable our custom poisoning on mingw. */
286#ifdef __MINGW32__
287#ifndef ZSTD_ASAN_DONT_POISON_WORKSPACE
288#define ZSTD_ASAN_DONT_POISON_WORKSPACE 1
289#endif
290#ifndef ZSTD_MSAN_DONT_POISON_WORKSPACE
291#define ZSTD_MSAN_DONT_POISON_WORKSPACE 1
292#endif
293#endif
294
295#if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE)
296/* Not all platforms that support msan provide sanitizers/msan_interface.h.
297 * We therefore declare the functions we need ourselves, rather than trying to
298 * include the header file... */
299#include <stddef.h> /* size_t */
300#define ZSTD_DEPS_NEED_STDINT
301#include "zstd_deps.h" /* intptr_t */
302
303/* Make memory region fully initialized (without changing its contents). */
304void __msan_unpoison(const volatile void *a, size_t size);
305
306/* Make memory region fully uninitialized (without changing its contents).
307 This is a legacy interface that does not update origin information. Use
308 __msan_allocated_memory() instead. */
309void __msan_poison(const volatile void *a, size_t size);
310
311/* Returns the offset of the first (at least partially) poisoned byte in the
312 memory range, or -1 if the whole range is good. */
313intptr_t __msan_test_shadow(const volatile void *x, size_t size);
314
315/* Print shadow and origin for the memory range to stderr in a human-readable
316 format. */
317void __msan_print_shadow(const volatile void *x, size_t size);
318#endif
319
320#if ZSTD_ADDRESS_SANITIZER && !defined(ZSTD_ASAN_DONT_POISON_WORKSPACE)
321/* Not all platforms that support asan provide sanitizers/asan_interface.h.
322 * We therefore declare the functions we need ourselves, rather than trying to
323 * include the header file... */
324#include <stddef.h> /* size_t */
325
326/**
327 * Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
328 *
329 * This memory must be previously allocated by your program. Instrumented
330 * code is forbidden from accessing addresses in this region until it is
331 * unpoisoned. This function is not guaranteed to poison the entire region -
332 * it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
333 * alignment restrictions.
334 *
335 * \note This function is not thread-safe because no two threads can poison or
336 * unpoison memory in the same memory region simultaneously.
337 *
338 * \param addr Start of memory region.
339 * \param size Size of memory region. */
340void __asan_poison_memory_region(void const volatile *addr, size_t size);
341
342/**
343 * Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
344 *
345 * This memory must be previously allocated by your program. Accessing
346 * addresses in this region is allowed until this region is poisoned again.
347 * This function could unpoison a super-region of <c>[addr, addr+size)</c> due
348 * to ASan alignment restrictions.
349 *
350 * \note This function is not thread-safe because no two threads can
351 * poison or unpoison memory in the same memory region simultaneously.
352 *
353 * \param addr Start of memory region.
354 * \param size Size of memory region. */
355void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
356#endif
357
358#endif /* ZSTD_COMPILER_H */
359