| 1 | /* |
| 2 | * Copyright 2005 Google Inc. All Rights Reserved. |
| 3 | * |
| 4 | * Redistribution and use in source and binary forms, with or without |
| 5 | * modification, are permitted provided that the following conditions are |
| 6 | * met: |
| 7 | * |
| 8 | * * Redistributions of source code must retain the above copyright |
| 9 | * notice, this list of conditions and the following disclaimer. |
| 10 | * * Redistributions in binary form must reproduce the above |
| 11 | * copyright notice, this list of conditions and the following disclaimer |
| 12 | * in the documentation and/or other materials provided with the |
| 13 | * distribution. |
| 14 | * * Neither the name of Google Inc. nor the names of its |
| 15 | * contributors may be used to endorse or promote products derived from |
| 16 | * this software without specific prior written permission. |
| 17 | * |
| 18 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 19 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 20 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 21 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 22 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 23 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 24 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 28 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 | */ |
| 30 | |
| 31 | #include "rdkafka_int.h" |
| 32 | #include "rdendian.h" |
| 33 | |
| 34 | |
| 35 | |
| 36 | #ifdef __FreeBSD__ |
| 37 | # include <sys/endian.h> |
| 38 | #elif defined(__APPLE_CC_) || (defined(__MACH__) && defined(__APPLE__)) /* MacOS/X support */ |
| 39 | # include <machine/endian.h> |
| 40 | |
| 41 | #if __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN |
| 42 | # define htole16(x) (x) |
| 43 | # define le32toh(x) (x) |
| 44 | #elif __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN |
| 45 | # define htole16(x) __DARWIN_OSSwapInt16(x) |
| 46 | # define le32toh(x) __DARWIN_OSSwapInt32(x) |
| 47 | #else |
| 48 | # error "Endianness is undefined" |
| 49 | #endif |
| 50 | |
| 51 | |
| 52 | #elif !defined(__WIN32__) && !defined(_MSC_VER) && !defined(__sun) && !defined(_AIX) |
| 53 | # include <endian.h> |
| 54 | #endif |
| 55 | |
| 56 | #include <stdlib.h> |
| 57 | #include <assert.h> |
| 58 | #include <string.h> |
| 59 | #include <errno.h> |
| 60 | #include <stdbool.h> |
| 61 | #include <limits.h> |
| 62 | #if !defined(__WIN32__) && !defined(_MSC_VER) |
| 63 | #include <sys/uio.h> |
| 64 | #endif |
| 65 | |
| 66 | #ifdef __ANDROID__ |
| 67 | #define le32toh letoh32 |
| 68 | #endif |
| 69 | |
| 70 | #if defined(__WIN32__) && defined(SG) |
| 71 | struct iovec { |
| 72 | void *iov_base; /* Pointer to data. */ |
| 73 | size_t iov_len; /* Length of data. */ |
| 74 | }; |
| 75 | #endif |
| 76 | |
| 77 | #define get_unaligned_memcpy(x) ({ \ |
| 78 | typeof(*(x)) _ret; \ |
| 79 | memcpy(&_ret, (x), sizeof(*(x))); \ |
| 80 | _ret; }) |
| 81 | #define put_unaligned_memcpy(v,x) ({ \ |
| 82 | typeof((v)) _v = (v); \ |
| 83 | memcpy((x), &_v, sizeof(*(x))); }) |
| 84 | |
| 85 | #define get_unaligned_direct(x) (*(x)) |
| 86 | #define put_unaligned_direct(v,x) (*(x) = (v)) |
| 87 | |
| 88 | // Potentially unaligned loads and stores. |
| 89 | // x86, PowerPC, and ARM64 can simply do these loads and stores native. |
| 90 | #if defined(__i386__) || defined(__x86_64__) || defined(__powerpc__) || \ |
| 91 | defined(_M_IX86) || defined(_M_X64) || defined(_M_AMD64) || \ |
| 92 | defined(__aarch64__) |
| 93 | |
| 94 | #define get_unaligned get_unaligned_direct |
| 95 | #define put_unaligned put_unaligned_direct |
| 96 | #define get_unaligned64 get_unaligned_direct |
| 97 | #define put_unaligned64 put_unaligned_direct |
| 98 | |
| 99 | // ARMv7 and newer support native unaligned accesses, but only of 16-bit |
| 100 | // and 32-bit values (not 64-bit); older versions either raise a fatal signal, |
| 101 | // do an unaligned read and rotate the words around a bit, or do the reads very |
| 102 | // slowly (trip through kernel mode). There's no simple #define that says just |
| 103 | // “ARMv7 or higher”, so we have to filter away all ARMv5 and ARMv6 |
| 104 | // sub-architectures. |
| 105 | // |
| 106 | // This is a mess, but there's not much we can do about it. |
| 107 | #elif defined(__arm__) && \ |
| 108 | !defined(__ARM_ARCH_4__) && \ |
| 109 | !defined(__ARM_ARCH_4T__) && \ |
| 110 | !defined(__ARM_ARCH_5__) && \ |
| 111 | !defined(__ARM_ARCH_5T__) && \ |
| 112 | !defined(__ARM_ARCH_5TE__) && \ |
| 113 | !defined(__ARM_ARCH_5TEJ__) && \ |
| 114 | !defined(__ARM_ARCH_6__) && \ |
| 115 | !defined(__ARM_ARCH_6J__) && \ |
| 116 | !defined(__ARM_ARCH_6K__) && \ |
| 117 | !defined(__ARM_ARCH_6Z__) && \ |
| 118 | !defined(__ARM_ARCH_6ZK__) && \ |
| 119 | !defined(__ARM_ARCH_6T2__) |
| 120 | |
| 121 | #define get_unaligned get_unaligned_direct |
| 122 | #define put_unaligned put_unaligned_direct |
| 123 | #define get_unaligned64 get_unaligned_memcpy |
| 124 | #define put_unaligned64 put_unaligned_memcpy |
| 125 | |
| 126 | // These macroses are provided for architectures that don't support |
| 127 | // unaligned loads and stores. |
| 128 | #else |
| 129 | |
| 130 | #define get_unaligned get_unaligned_memcpy |
| 131 | #define put_unaligned put_unaligned_memcpy |
| 132 | #define get_unaligned64 get_unaligned_memcpy |
| 133 | #define put_unaligned64 put_unaligned_memcpy |
| 134 | |
| 135 | #endif |
| 136 | |
| 137 | #define get_unaligned_le32(x) (le32toh(get_unaligned((u32 *)(x)))) |
| 138 | #define put_unaligned_le16(v,x) (put_unaligned(htole16(v), (u16 *)(x))) |
| 139 | |
| 140 | typedef unsigned char u8; |
| 141 | typedef unsigned short u16; |
| 142 | typedef unsigned u32; |
| 143 | typedef unsigned long long u64; |
| 144 | |
| 145 | #ifdef _MSC_VER |
| 146 | #define BUG_ON(x) do { if (unlikely((x))) abort(); } while (0) |
| 147 | #else |
| 148 | #define BUG_ON(x) assert(!(x)) |
| 149 | #endif |
| 150 | |
| 151 | |
| 152 | #define vmalloc(x) malloc(x) |
| 153 | #define vfree(x) free(x) |
| 154 | |
| 155 | #define EXPORT_SYMBOL(x) |
| 156 | |
| 157 | #define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x))) |
| 158 | |
| 159 | #ifndef likely |
| 160 | #define likely(x) __builtin_expect((x), 1) |
| 161 | #define unlikely(x) __builtin_expect((x), 0) |
| 162 | #endif |
| 163 | |
| 164 | #define min_t(t,x,y) ((x) < (y) ? (x) : (y)) |
| 165 | #define max_t(t,x,y) ((x) > (y) ? (x) : (y)) |
| 166 | |
| 167 | #if __BYTE_ORDER == __LITTLE_ENDIAN |
| 168 | #define __LITTLE_ENDIAN__ 1 |
| 169 | #endif |
| 170 | |
| 171 | #if __LITTLE_ENDIAN__ == 1 || defined(__WIN32__) |
| 172 | #ifndef htole16 |
| 173 | #define htole16(x) (x) |
| 174 | #endif |
| 175 | #ifndef le32toh |
| 176 | #define le32toh(x) (x) |
| 177 | #endif |
| 178 | #endif |
| 179 | |
| 180 | |
| 181 | #if defined(_MSC_VER) |
| 182 | #if BYTE_ORDER == LITTLE_ENDIAN |
| 183 | #define htole16(x) (x) |
| 184 | #define le32toh(x) (x) |
| 185 | |
| 186 | #elif BYTE_ORDER == BIG_ENDIAN |
| 187 | #define htole16(x) __builtin_bswap16(x) |
| 188 | #define le32toh(x) __builtin_bswap32(x) |
| 189 | #endif |
| 190 | #endif |
| 191 | |
| 192 | #if defined(__sun) |
| 193 | #ifndef htole16 |
| 194 | #define htole16(x) LE_16(x) |
| 195 | #endif |
| 196 | #ifndef le32toh |
| 197 | #define le32toh(x) LE_32(x) |
| 198 | #endif |
| 199 | #endif |
| 200 | |
| 201 | #define BITS_PER_LONG (__SIZEOF_LONG__ * 8) |
| 202 | |