| 1 | // Copyright 2005 Google, Inc |
| 2 | // |
| 3 | // Utility functions that depend on bytesex. We define htonll and ntohll, |
| 4 | // as well as "Google" versions of all the standards: ghtonl, ghtons, and |
| 5 | // so on. These functions do exactly the same as their standard variants, |
| 6 | // but don't require including the dangerous netinet/in.h. |
| 7 | |
| 8 | #ifndef UTIL_ENDIAN_ENDIAN_H_ |
| 9 | #define UTIL_ENDIAN_ENDIAN_H_ |
| 10 | |
| 11 | #include "base/integral_types.h" |
| 12 | #include "base/logging.h" |
| 13 | #include "base/port.h" |
| 14 | #include "base/int128.h" |
| 15 | |
| 16 | inline uint64 gbswap_64(uint64 host_int) { |
| 17 | #if defined(COMPILER_GCC3) && defined(__x86_64__) |
| 18 | // Adapted from /usr/include/byteswap.h. |
| 19 | if (__builtin_constant_p(host_int)) { |
| 20 | return __bswap_constant_64(host_int); |
| 21 | } else { |
| 22 | register uint64 result; |
| 23 | __asm__ ("bswap %0" : "=r" (result) : "0" (host_int)); |
| 24 | return result; |
| 25 | } |
| 26 | #elif defined(bswap_64) |
| 27 | return bswap_64(host_int); |
| 28 | #else |
| 29 | return static_cast<uint64>(bswap_32(static_cast<uint32>(host_int >> 32))) | |
| 30 | (static_cast<uint64>(bswap_32(static_cast<uint32>(host_int))) << 32); |
| 31 | #endif // bswap_64 |
| 32 | } |
| 33 | |
| 34 | #ifdef IS_LITTLE_ENDIAN |
| 35 | |
| 36 | // Definitions for ntohl etc. that don't require us to include |
| 37 | // netinet/in.h. We wrap bswap_32 and bswap_16 in functions rather |
| 38 | // than just #defining them because in debug mode, gcc doesn't |
| 39 | // correctly handle the (rather involved) definitions of bswap_32. |
| 40 | // gcc guarantees that inline functions are as fast as macros, so |
| 41 | // this isn't a performance hit. |
| 42 | inline uint16 ghtons(uint16 x) { return bswap_16(x); } |
| 43 | inline uint32 ghtonl(uint32 x) { return bswap_32(x); } |
| 44 | inline uint64 ghtonll(uint64 x) { return gbswap_64(x); } |
| 45 | |
| 46 | #elif defined IS_BIG_ENDIAN |
| 47 | |
| 48 | // These definitions are a lot simpler on big-endian machines |
| 49 | #define ghtons(x) (x) |
| 50 | #define ghtonl(x) (x) |
| 51 | #define ghtonll(x) (x) |
| 52 | |
| 53 | #else |
| 54 | #error "Unsupported bytesex: Either IS_BIG_ENDIAN or IS_LITTLE_ENDIAN must be defined" |
| 55 | #endif // bytesex |
| 56 | |
| 57 | // Convert to little-endian storage, opposite of network format. |
| 58 | // Convert x from host to little endian: x = LittleEndian.FromHost(x); |
| 59 | // convert x from little endian to host: x = LittleEndian.ToHost(x); |
| 60 | // |
| 61 | // Store values into unaligned memory converting to little endian order: |
| 62 | // LittleEndian.Store16(p, x); |
| 63 | // |
| 64 | // Load unaligned values stored in little endian coverting to host order: |
| 65 | // x = LittleEndian.Load16(p); |
| 66 | class LittleEndian { |
| 67 | public: |
| 68 | // Conversion functions. |
| 69 | #ifdef IS_LITTLE_ENDIAN |
| 70 | |
| 71 | static uint16 FromHost16(uint16 x) { return x; } |
| 72 | static uint16 ToHost16(uint16 x) { return x; } |
| 73 | |
| 74 | static uint32 FromHost32(uint32 x) { return x; } |
| 75 | static uint32 ToHost32(uint32 x) { return x; } |
| 76 | |
| 77 | static uint64 FromHost64(uint64 x) { return x; } |
| 78 | static uint64 ToHost64(uint64 x) { return x; } |
| 79 | |
| 80 | static bool IsLittleEndian() { return true; } |
| 81 | |
| 82 | #elif defined IS_BIG_ENDIAN |
| 83 | |
| 84 | static uint16 FromHost16(uint16 x) { return bswap_16(x); } |
| 85 | static uint16 ToHost16(uint16 x) { return bswap_16(x); } |
| 86 | |
| 87 | static uint32 FromHost32(uint32 x) { return bswap_32(x); } |
| 88 | static uint32 ToHost32(uint32 x) { return bswap_32(x); } |
| 89 | |
| 90 | static uint64 FromHost64(uint64 x) { return gbswap_64(x); } |
| 91 | static uint64 ToHost64(uint64 x) { return gbswap_64(x); } |
| 92 | |
| 93 | static bool IsLittleEndian() { return false; } |
| 94 | |
| 95 | #endif /* ENDIAN */ |
| 96 | |
| 97 | // Functions to do unaligned loads and stores in little-endian order. |
| 98 | static uint16 Load16(const void *p) { |
| 99 | return ToHost16(UNALIGNED_LOAD16(p)); |
| 100 | } |
| 101 | |
| 102 | static void Store16(void *p, uint16 v) { |
| 103 | UNALIGNED_STORE16(p, FromHost16(v)); |
| 104 | } |
| 105 | |
| 106 | static uint32 Load32(const void *p) { |
| 107 | return ToHost32(UNALIGNED_LOAD32(p)); |
| 108 | } |
| 109 | |
| 110 | static void Store32(void *p, uint32 v) { |
| 111 | UNALIGNED_STORE32(p, FromHost32(v)); |
| 112 | } |
| 113 | |
| 114 | static uint64 Load64(const void *p) { |
| 115 | return ToHost64(UNALIGNED_LOAD64(p)); |
| 116 | } |
| 117 | |
| 118 | // Build a uint64 from 1-8 bytes. |
| 119 | // 8 * len least significant bits are loaded from the memory with |
| 120 | // LittleEndian order. The 64 - 8 * len most significant bits are |
| 121 | // set all to 0. |
| 122 | // In latex-friendly words, this function returns: |
| 123 | // $\sum_{i=0}^{len-1} p[i] 256^{i}$, where p[i] is unsigned. |
| 124 | // |
| 125 | // This function is equivalent with: |
| 126 | // uint64 val = 0; |
| 127 | // memcpy(&val, p, len); |
| 128 | // return ToHost64(val); |
| 129 | // TODO(user): write a small benchmark and benchmark the speed |
| 130 | // of a memcpy based approach. |
| 131 | // |
| 132 | // For speed reasons this function does not work for len == 0. |
| 133 | // The caller needs to guarantee that 1 <= len <= 8. |
| 134 | static uint64 Load64VariableLength(const void * const p, int len) { |
| 135 | DCHECK_GE(len, 1); |
| 136 | DCHECK_LE(len, 8); |
| 137 | const char * const buf = static_cast<const char * const>(p); |
| 138 | uint64 val = 0; |
| 139 | --len; |
| 140 | do { |
| 141 | val = (val << 8) | buf[len]; |
| 142 | // (--len >= 0) is about 10 % faster than (len--) in some benchmarks. |
| 143 | } while (--len >= 0); |
| 144 | // No ToHost64(...) needed. The bytes are accessed in little-endian manner |
| 145 | // on every architecture. |
| 146 | return val; |
| 147 | } |
| 148 | |
| 149 | static void Store64(void *p, uint64 v) { |
| 150 | UNALIGNED_STORE64(p, FromHost64(v)); |
| 151 | } |
| 152 | |
| 153 | static uint128 Load128(const void *p) { |
| 154 | return uint128( |
| 155 | ToHost64(UNALIGNED_LOAD64(reinterpret_cast<const uint64 *>(p) + 1)), |
| 156 | ToHost64(UNALIGNED_LOAD64(p))); |
| 157 | } |
| 158 | |
| 159 | static void Store128(void *p, const uint128 v) { |
| 160 | UNALIGNED_STORE64(p, FromHost64(Uint128Low64(v))); |
| 161 | UNALIGNED_STORE64(reinterpret_cast<uint64 *>(p) + 1, |
| 162 | FromHost64(Uint128High64(v))); |
| 163 | } |
| 164 | |
| 165 | // Build a uint128 from 1-16 bytes. |
| 166 | // 8 * len least significant bits are loaded from the memory with |
| 167 | // LittleEndian order. The 128 - 8 * len most significant bits are |
| 168 | // set all to 0. |
| 169 | static uint128 Load128VariableLength(const void *p, int len) { |
| 170 | if (len <= 8) { |
| 171 | return uint128(Load64VariableLength(p, len)); |
| 172 | } else { |
| 173 | return uint128( |
| 174 | Load64VariableLength(static_cast<const char *>(p) + 8, len - 8), |
| 175 | Load64(p)); |
| 176 | } |
| 177 | } |
| 178 | }; |
| 179 | |
| 180 | |
| 181 | // This one is safe to take as it's an extension |
| 182 | #define htonll(x) ghtonll(x) |
| 183 | |
| 184 | // ntoh* and hton* are the same thing for any size and bytesex, |
| 185 | // since the function is an involution, i.e., its own inverse. |
| 186 | #define gntohl(x) ghtonl(x) |
| 187 | #define gntohs(x) ghtons(x) |
| 188 | #define gntohll(x) ghtonll(x) |
| 189 | #define ntohll(x) htonll(x) |
| 190 | |
| 191 | #endif // UTIL_ENDIAN_ENDIAN_H_ |
| 192 | |