1 | // Protocol Buffers - Google's data interchange format |
2 | // Copyright 2008 Google Inc. All rights reserved. |
3 | // https://developers.google.com/protocol-buffers/ |
4 | // |
5 | // Redistribution and use in source and binary forms, with or without |
6 | // modification, are permitted provided that the following conditions are |
7 | // met: |
8 | // |
9 | // * Redistributions of source code must retain the above copyright |
10 | // notice, this list of conditions and the following disclaimer. |
11 | // * Redistributions in binary form must reproduce the above |
12 | // copyright notice, this list of conditions and the following disclaimer |
13 | // in the documentation and/or other materials provided with the |
14 | // distribution. |
15 | // * Neither the name of Google Inc. nor the names of its |
16 | // contributors may be used to endorse or promote products derived from |
17 | // this software without specific prior written permission. |
18 | // |
19 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
20 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
21 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
22 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
23 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
24 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
25 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
26 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
27 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
28 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
29 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 | |
31 | #ifndef GOOGLE_PROTOBUF_STUBS_PORT_H_ |
32 | #define GOOGLE_PROTOBUF_STUBS_PORT_H_ |
33 | |
34 | #include <assert.h> |
35 | #include <cstdint> |
36 | #include <stdlib.h> |
37 | #include <cstddef> |
38 | #include <string> |
39 | #include <string.h> |
40 | |
41 | #include <google/protobuf/stubs/platform_macros.h> |
42 | |
43 | #include <google/protobuf/port_def.inc> |
44 | |
45 | #undef PROTOBUF_LITTLE_ENDIAN |
46 | #ifdef _WIN32 |
47 | // Assuming windows is always little-endian. |
48 | // TODO(xiaofeng): The PROTOBUF_LITTLE_ENDIAN is not only used for |
49 | // optimization but also for correctness. We should define an |
50 | // different macro to test the big-endian code path in coded_stream. |
51 | #if !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST) |
52 | #define PROTOBUF_LITTLE_ENDIAN 1 |
53 | #endif |
54 | #if defined(_MSC_VER) && _MSC_VER >= 1300 && !defined(__INTEL_COMPILER) |
55 | // If MSVC has "/RTCc" set, it will complain about truncating casts at |
56 | // runtime. This file contains some intentional truncating casts. |
57 | #pragma runtime_checks("c", off) |
58 | #endif |
59 | #else |
60 | #ifdef __APPLE__ |
61 | #include <machine/endian.h> // __BYTE_ORDER |
62 | #elif defined(__FreeBSD__) |
63 | #include <sys/endian.h> // __BYTE_ORDER |
64 | #elif (defined(sun) || defined(__sun)) && (defined(__SVR4) || defined(__svr4__)) |
65 | #include <sys/isa_defs.h> // __BYTE_ORDER |
66 | #elif defined(_AIX) || defined(__TOS_AIX__) |
67 | #include <sys/machine.h> // BYTE_ORDER |
68 | #else |
69 | #if !defined(__QNX__) |
70 | #include <endian.h> // __BYTE_ORDER |
71 | #endif |
72 | #endif |
73 | #if ((defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)) || \ |
74 | (defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN) || \ |
75 | (defined(BYTE_ORDER) && BYTE_ORDER == LITTLE_ENDIAN)) && \ |
76 | !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST) |
77 | #define PROTOBUF_LITTLE_ENDIAN 1 |
78 | #endif |
79 | #endif |
80 | |
81 | // These #includes are for the byte swap functions declared later on. |
82 | #ifdef _MSC_VER |
83 | #include <stdlib.h> // NOLINT(build/include) |
84 | #include <intrin.h> |
85 | #elif defined(__APPLE__) |
86 | #include <libkern/OSByteOrder.h> |
87 | #elif defined(__linux__) || defined(__ANDROID__) || defined(__CYGWIN__) |
88 | #include <byteswap.h> // IWYU pragma: export |
89 | #endif |
90 | |
91 | // Legacy: some users reference these (internal-only) macros even though we |
92 | // don't need them any more. |
93 | #if defined(_MSC_VER) && defined(PROTOBUF_USE_DLLS) |
94 | #ifdef LIBPROTOBUF_EXPORTS |
95 | #define LIBPROTOBUF_EXPORT __declspec(dllexport) |
96 | #else |
97 | #define LIBPROTOBUF_EXPORT __declspec(dllimport) |
98 | #endif |
99 | #ifdef LIBPROTOC_EXPORTS |
100 | #define LIBPROTOC_EXPORT __declspec(dllexport) |
101 | #else |
102 | #define LIBPROTOC_EXPORT __declspec(dllimport) |
103 | #endif |
104 | #else |
105 | #define LIBPROTOBUF_EXPORT |
106 | #define LIBPROTOC_EXPORT |
107 | #endif |
108 | |
109 | #define PROTOBUF_RUNTIME_DEPRECATED(message) PROTOBUF_DEPRECATED_MSG(message) |
110 | #define GOOGLE_PROTOBUF_RUNTIME_DEPRECATED(message) \ |
111 | PROTOBUF_DEPRECATED_MSG(message) |
112 | |
113 | // =================================================================== |
114 | // from google3/base/port.h |
115 | |
116 | #if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L || \ |
117 | (defined(_MSC_VER) && _MSC_VER >= 1900)) |
118 | // Define this to 1 if the code is compiled in C++11 mode; leave it |
119 | // undefined otherwise. Do NOT define it to 0 -- that causes |
120 | // '#ifdef LANG_CXX11' to behave differently from '#if LANG_CXX11'. |
121 | #define LANG_CXX11 1 |
122 | #else |
123 | #error "Protobuf requires at least C++11." |
124 | #endif |
125 | |
126 | namespace google { |
127 | namespace protobuf { |
128 | |
129 | using ConstStringParam = const std::string &; |
130 | |
131 | typedef unsigned int uint; |
132 | |
133 | typedef int8_t int8; |
134 | typedef int16_t int16; |
135 | typedef int32_t int32; |
136 | typedef int64_t int64; |
137 | |
138 | typedef uint8_t uint8; |
139 | typedef uint16_t uint16; |
140 | typedef uint32_t uint32; |
141 | typedef uint64_t uint64; |
142 | |
143 | static const int32 kint32max = 0x7FFFFFFF; |
144 | static const int32 kint32min = -kint32max - 1; |
145 | static const int64 kint64max = int64_t{0x7FFFFFFFFFFFFFFF}; |
146 | static const int64 kint64min = -kint64max - 1; |
147 | static const uint32 kuint32max = 0xFFFFFFFFu; |
148 | static const uint64 kuint64max = uint64_t{0xFFFFFFFFFFFFFFFFu}; |
149 | |
150 | #if defined(ADDRESS_SANITIZER) || defined(THREAD_SANITIZER) ||\ |
151 | defined(MEMORY_SANITIZER) |
152 | |
153 | #ifdef __cplusplus |
154 | extern "C" { |
155 | #endif // __cplusplus |
156 | uint16_t __sanitizer_unaligned_load16(const void *p); |
157 | uint32_t __sanitizer_unaligned_load32(const void *p); |
158 | uint64_t __sanitizer_unaligned_load64(const void *p); |
159 | void __sanitizer_unaligned_store16(void *p, uint16_t v); |
160 | void __sanitizer_unaligned_store32(void *p, uint32_t v); |
161 | void __sanitizer_unaligned_store64(void *p, uint64_t v); |
162 | #ifdef __cplusplus |
163 | } // extern "C" |
164 | #endif // __cplusplus |
165 | |
166 | inline uint16_t GOOGLE_UNALIGNED_LOAD16(const void *p) { |
167 | return __sanitizer_unaligned_load16(p); |
168 | } |
169 | |
170 | inline uint32_t GOOGLE_UNALIGNED_LOAD32(const void *p) { |
171 | return __sanitizer_unaligned_load32(p); |
172 | } |
173 | |
174 | inline uint64_t GOOGLE_UNALIGNED_LOAD64(const void *p) { |
175 | return __sanitizer_unaligned_load64(p); |
176 | } |
177 | |
178 | inline void GOOGLE_UNALIGNED_STORE16(void *p, uint16_t v) { |
179 | __sanitizer_unaligned_store16(p, v); |
180 | } |
181 | |
182 | inline void GOOGLE_UNALIGNED_STORE32(void *p, uint32_t v) { |
183 | __sanitizer_unaligned_store32(p, v); |
184 | } |
185 | |
186 | inline void GOOGLE_UNALIGNED_STORE64(void *p, uint64_t v) { |
187 | __sanitizer_unaligned_store64(p, v); |
188 | } |
189 | |
190 | #elif defined(GOOGLE_PROTOBUF_USE_UNALIGNED) && GOOGLE_PROTOBUF_USE_UNALIGNED |
191 | |
192 | #define GOOGLE_UNALIGNED_LOAD16(_p) (*reinterpret_cast<const uint16_t *>(_p)) |
193 | #define GOOGLE_UNALIGNED_LOAD32(_p) (*reinterpret_cast<const uint32_t *>(_p)) |
194 | #define GOOGLE_UNALIGNED_LOAD64(_p) (*reinterpret_cast<const uint64_t *>(_p)) |
195 | |
196 | #define GOOGLE_UNALIGNED_STORE16(_p, _val) (*reinterpret_cast<uint16_t *>(_p) = (_val)) |
197 | #define GOOGLE_UNALIGNED_STORE32(_p, _val) (*reinterpret_cast<uint32_t *>(_p) = (_val)) |
198 | #define GOOGLE_UNALIGNED_STORE64(_p, _val) (*reinterpret_cast<uint64_t *>(_p) = (_val)) |
199 | |
200 | #else |
201 | inline uint16_t GOOGLE_UNALIGNED_LOAD16(const void *p) { |
202 | uint16_t t; |
203 | memcpy(dest: &t, src: p, n: sizeof t); |
204 | return t; |
205 | } |
206 | |
207 | inline uint32_t GOOGLE_UNALIGNED_LOAD32(const void *p) { |
208 | uint32_t t; |
209 | memcpy(dest: &t, src: p, n: sizeof t); |
210 | return t; |
211 | } |
212 | |
213 | inline uint64_t GOOGLE_UNALIGNED_LOAD64(const void *p) { |
214 | uint64_t t; |
215 | memcpy(dest: &t, src: p, n: sizeof t); |
216 | return t; |
217 | } |
218 | |
219 | inline void GOOGLE_UNALIGNED_STORE16(void *p, uint16_t v) { |
220 | memcpy(dest: p, src: &v, n: sizeof v); |
221 | } |
222 | |
223 | inline void GOOGLE_UNALIGNED_STORE32(void *p, uint32_t v) { |
224 | memcpy(dest: p, src: &v, n: sizeof v); |
225 | } |
226 | |
227 | inline void GOOGLE_UNALIGNED_STORE64(void *p, uint64_t v) { |
228 | memcpy(dest: p, src: &v, n: sizeof v); |
229 | } |
230 | #endif |
231 | |
232 | #if defined(GOOGLE_PROTOBUF_OS_NACL) \ |
233 | || (defined(__ANDROID__) && defined(__clang__) \ |
234 | && (__clang_major__ == 3 && __clang_minor__ == 8) \ |
235 | && (__clang_patchlevel__ < 275480)) |
236 | # define GOOGLE_PROTOBUF_USE_PORTABLE_LOG2 |
237 | #endif |
238 | |
239 | // The following guarantees declaration of the byte swap functions. |
240 | #ifdef _MSC_VER |
241 | #define bswap_16(x) _byteswap_ushort(x) |
242 | #define bswap_32(x) _byteswap_ulong(x) |
243 | #define bswap_64(x) _byteswap_uint64(x) |
244 | |
245 | #elif defined(__APPLE__) |
246 | // Mac OS X / Darwin features |
247 | #define bswap_16(x) OSSwapInt16(x) |
248 | #define bswap_32(x) OSSwapInt32(x) |
249 | #define bswap_64(x) OSSwapInt64(x) |
250 | |
251 | #elif !defined(__linux__) && !defined(__ANDROID__) && !defined(__CYGWIN__) |
252 | |
253 | #ifndef bswap_16 |
254 | static inline uint16_t bswap_16(uint16_t x) { |
255 | return static_cast<uint16_t>(((x & 0xFF) << 8) | ((x & 0xFF00) >> 8)); |
256 | } |
257 | #define bswap_16(x) bswap_16(x) |
258 | #endif |
259 | |
260 | #ifndef bswap_32 |
261 | static inline uint32_t bswap_32(uint32_t x) { |
262 | return (((x & 0xFF) << 24) | |
263 | ((x & 0xFF00) << 8) | |
264 | ((x & 0xFF0000) >> 8) | |
265 | ((x & 0xFF000000) >> 24)); |
266 | } |
267 | #define bswap_32(x) bswap_32(x) |
268 | #endif |
269 | |
270 | #ifndef bswap_64 |
271 | static inline uint64_t bswap_64(uint64_t x) { |
272 | return (((x & uint64_t{0xFFu}) << 56) | ((x & uint64_t{0xFF00u}) << 40) | |
273 | ((x & uint64_t{0xFF0000u}) << 24) | |
274 | ((x & uint64_t{0xFF000000u}) << 8) | |
275 | ((x & uint64_t{0xFF00000000u}) >> 8) | |
276 | ((x & uint64_t{0xFF0000000000u}) >> 24) | |
277 | ((x & uint64_t{0xFF000000000000u}) >> 40) | |
278 | ((x & uint64_t{0xFF00000000000000u}) >> 56)); |
279 | } |
280 | #define bswap_64(x) bswap_64(x) |
281 | #endif |
282 | |
283 | #endif |
284 | |
285 | // =================================================================== |
286 | // from google3/util/bits/bits.h |
287 | |
288 | class Bits { |
289 | public: |
290 | static uint32_t Log2FloorNonZero(uint32_t n) { |
291 | #if defined(__GNUC__) |
292 | return 31 ^ static_cast<uint32_t>(__builtin_clz(n)); |
293 | #elif defined(_MSC_VER) |
294 | unsigned long where; |
295 | _BitScanReverse(&where, n); |
296 | return where; |
297 | #else |
298 | return Log2FloorNonZero_Portable(n); |
299 | #endif |
300 | } |
301 | |
302 | static uint32_t Log2FloorNonZero64(uint64_t n) { |
303 | // Older versions of clang run into an instruction-selection failure when |
304 | // it encounters __builtin_clzll: |
305 | // https://bugs.chromium.org/p/nativeclient/issues/detail?id=4395 |
306 | // This includes arm-nacl-clang and clang in older Android NDK versions. |
307 | // To work around this, when we build with those we use the portable |
308 | // implementation instead. |
309 | #if defined(__GNUC__) && !defined(GOOGLE_PROTOBUF_USE_PORTABLE_LOG2) |
310 | return 63 ^ static_cast<uint32_t>(__builtin_clzll(n)); |
311 | #elif defined(_MSC_VER) && defined(_M_X64) |
312 | unsigned long where; |
313 | _BitScanReverse64(&where, n); |
314 | return where; |
315 | #else |
316 | return Log2FloorNonZero64_Portable(n); |
317 | #endif |
318 | } |
319 | private: |
320 | static int Log2FloorNonZero_Portable(uint32_t n) { |
321 | if (n == 0) |
322 | return -1; |
323 | int log = 0; |
324 | uint32_t value = n; |
325 | for (int i = 4; i >= 0; --i) { |
326 | int shift = (1 << i); |
327 | uint32_t x = value >> shift; |
328 | if (x != 0) { |
329 | value = x; |
330 | log += shift; |
331 | } |
332 | } |
333 | assert(value == 1); |
334 | return log; |
335 | } |
336 | |
337 | static int Log2FloorNonZero64_Portable(uint64_t n) { |
338 | const uint32_t topbits = static_cast<uint32_t>(n >> 32); |
339 | if (topbits == 0) { |
340 | // Top bits are zero, so scan in bottom bits |
341 | return static_cast<int>(Log2FloorNonZero(n: static_cast<uint32_t>(n))); |
342 | } else { |
343 | return 32 + static_cast<int>(Log2FloorNonZero(n: topbits)); |
344 | } |
345 | } |
346 | }; |
347 | |
348 | // =================================================================== |
349 | // from google3/util/endian/endian.h |
350 | PROTOBUF_EXPORT uint32_t ghtonl(uint32_t x); |
351 | |
352 | class BigEndian { |
353 | public: |
354 | #ifdef PROTOBUF_LITTLE_ENDIAN |
355 | |
356 | static uint16_t FromHost16(uint16_t x) { return bswap_16(x); } |
357 | static uint16_t ToHost16(uint16_t x) { return bswap_16(x); } |
358 | |
359 | static uint32_t FromHost32(uint32_t x) { return bswap_32(x); } |
360 | static uint32_t ToHost32(uint32_t x) { return bswap_32(x); } |
361 | |
362 | static uint64_t FromHost64(uint64_t x) { return bswap_64(x); } |
363 | static uint64_t ToHost64(uint64_t x) { return bswap_64(x); } |
364 | |
365 | static bool IsLittleEndian() { return true; } |
366 | |
367 | #else |
368 | |
369 | static uint16_t FromHost16(uint16_t x) { return x; } |
370 | static uint16_t ToHost16(uint16_t x) { return x; } |
371 | |
372 | static uint32_t FromHost32(uint32_t x) { return x; } |
373 | static uint32_t ToHost32(uint32_t x) { return x; } |
374 | |
375 | static uint64_t FromHost64(uint64_t x) { return x; } |
376 | static uint64_t ToHost64(uint64_t x) { return x; } |
377 | |
378 | static bool IsLittleEndian() { return false; } |
379 | |
380 | #endif /* ENDIAN */ |
381 | |
382 | // Functions to do unaligned loads and stores in big-endian order. |
383 | static uint16_t Load16(const void *p) { |
384 | return ToHost16(x: GOOGLE_UNALIGNED_LOAD16(p)); |
385 | } |
386 | |
387 | static void Store16(void *p, uint16_t v) { |
388 | GOOGLE_UNALIGNED_STORE16(p, v: FromHost16(x: v)); |
389 | } |
390 | |
391 | static uint32_t Load32(const void *p) { |
392 | return ToHost32(x: GOOGLE_UNALIGNED_LOAD32(p)); |
393 | } |
394 | |
395 | static void Store32(void *p, uint32_t v) { |
396 | GOOGLE_UNALIGNED_STORE32(p, v: FromHost32(x: v)); |
397 | } |
398 | |
399 | static uint64_t Load64(const void *p) { |
400 | return ToHost64(x: GOOGLE_UNALIGNED_LOAD64(p)); |
401 | } |
402 | |
403 | static void Store64(void *p, uint64_t v) { |
404 | GOOGLE_UNALIGNED_STORE64(p, v: FromHost64(x: v)); |
405 | } |
406 | }; |
407 | |
408 | } // namespace protobuf |
409 | } // namespace google |
410 | |
411 | #include <google/protobuf/port_undef.inc> |
412 | |
413 | #endif // GOOGLE_PROTOBUF_STUBS_PORT_H_ |
414 | |