1 | // basisu.h |
2 | // Copyright (C) 2019-2021 Binomial LLC. All Rights Reserved. |
3 | // Important: If compiling with gcc, be sure strict aliasing is disabled: -fno-strict-aliasing |
4 | // |
5 | // Licensed under the Apache License, Version 2.0 (the "License"); |
6 | // you may not use this file except in compliance with the License. |
7 | // You may obtain a copy of the License at |
8 | // |
9 | // http://www.apache.org/licenses/LICENSE-2.0 |
10 | // |
11 | // Unless required by applicable law or agreed to in writing, software |
12 | // distributed under the License is distributed on an "AS IS" BASIS, |
13 | // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
14 | // See the License for the specific language governing permissions and |
15 | // limitations under the License. |
16 | #pragma once |
17 | |
18 | #ifdef _MSC_VER |
19 | |
20 | #pragma warning (disable : 4201) |
21 | #pragma warning (disable : 4127) // warning C4127: conditional expression is constant |
22 | #pragma warning (disable : 4530) // C++ exception handler used, but unwind semantics are not enabled. |
23 | |
24 | // Slamming this off always for v1.16 because we've gotten rid of most std containers. |
25 | #ifndef BASISU_NO_ITERATOR_DEBUG_LEVEL |
26 | #define BASISU_NO_ITERATOR_DEBUG_LEVEL (1) |
27 | #endif |
28 | |
29 | #ifndef BASISU_NO_ITERATOR_DEBUG_LEVEL |
30 | //#define _HAS_ITERATOR_DEBUGGING 0 |
31 | |
32 | #if defined(_DEBUG) || defined(DEBUG) |
33 | // This is madness, but we need to disable iterator debugging in debug builds or the encoder is unsable because MSVC's iterator debugging implementation is totally broken. |
34 | #ifndef _ITERATOR_DEBUG_LEVEL |
35 | #define _ITERATOR_DEBUG_LEVEL 1 |
36 | #endif |
37 | #ifndef _SECURE_SCL |
38 | #define _SECURE_SCL 1 |
39 | #endif |
40 | #else // defined(_DEBUG) || defined(DEBUG) |
41 | #ifndef _SECURE_SCL |
42 | #define _SECURE_SCL 0 |
43 | #endif |
44 | #ifndef _ITERATOR_DEBUG_LEVEL |
45 | #define _ITERATOR_DEBUG_LEVEL 0 |
46 | #endif |
47 | #endif // defined(_DEBUG) || defined(DEBUG) |
48 | |
49 | #endif // BASISU_NO_ITERATOR_DEBUG_LEVEL |
50 | |
51 | #endif // _MSC_VER |
52 | |
53 | #include <stdlib.h> |
54 | #include <stdio.h> |
55 | #include <math.h> |
56 | #include <stdarg.h> |
57 | #include <string.h> |
58 | #include <memory.h> |
59 | #include <limits.h> |
60 | #include <stdint.h> |
61 | |
62 | #include <algorithm> |
63 | #include <limits> |
64 | #include <functional> |
65 | #include <iterator> |
66 | #include <type_traits> |
67 | #include <assert.h> |
68 | #include <random> |
69 | |
70 | #include "basisu_containers.h" |
71 | |
72 | #ifdef max |
73 | #undef max |
74 | #endif |
75 | |
76 | #ifdef min |
77 | #undef min |
78 | #endif |
79 | |
80 | #ifdef _WIN32 |
81 | #define strcasecmp _stricmp |
82 | #endif |
83 | |
84 | // Set to one to enable debug printf()'s when any errors occur, for development/debugging. Especially useful for WebGL development. |
85 | #ifndef BASISU_FORCE_DEVEL_MESSAGES |
86 | #define BASISU_FORCE_DEVEL_MESSAGES 0 |
87 | #endif |
88 | |
89 | #define BASISU_NOTE_UNUSED(x) (void)(x) |
90 | #define BASISU_ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) |
91 | #define BASISU_NO_EQUALS_OR_COPY_CONSTRUCT(x) x(const x &) = delete; x& operator= (const x &) = delete; |
92 | #define BASISU_ASSUME(x) static_assert(x, #x); |
93 | #define BASISU_OFFSETOF(s, m) offsetof(s, m) |
94 | #define BASISU_STRINGIZE(x) #x |
95 | #define BASISU_STRINGIZE2(x) BASISU_STRINGIZE(x) |
96 | |
97 | #if BASISU_FORCE_DEVEL_MESSAGES |
98 | #define BASISU_DEVEL_ERROR(...) do { basisu::debug_printf(__VA_ARGS__); } while(0) |
99 | #else |
100 | #define BASISU_DEVEL_ERROR(...) |
101 | #endif |
102 | |
103 | namespace basisu |
104 | { |
105 | // Types/utilities |
106 | |
107 | #ifdef _WIN32 |
108 | const char BASISU_PATH_SEPERATOR_CHAR = '\\'; |
109 | #else |
110 | const char BASISU_PATH_SEPERATOR_CHAR = '/'; |
111 | #endif |
112 | |
113 | typedef basisu::vector<uint8_t> uint8_vec; |
114 | typedef basisu::vector<int16_t> int16_vec; |
115 | typedef basisu::vector<uint16_t> uint16_vec; |
116 | typedef basisu::vector<uint32_t> uint_vec; |
117 | typedef basisu::vector<uint64_t> uint64_vec; |
118 | typedef basisu::vector<int> int_vec; |
119 | typedef basisu::vector<bool> bool_vec; |
120 | |
121 | void enable_debug_printf(bool enabled); |
122 | void debug_printf(const char *pFmt, ...); |
123 | |
124 | |
125 | template <typename T> inline void clear_obj(T& obj) { memset(&obj, 0, sizeof(obj)); } |
126 | |
127 | template <typename T0, typename T1> inline T0 lerp(T0 a, T0 b, T1 c) { return a + (b - a) * c; } |
128 | |
129 | template <typename S> inline S maximum(S a, S b) { return (a > b) ? a : b; } |
130 | template <typename S> inline S maximum(S a, S b, S c) { return maximum(maximum(a, b), c); } |
131 | template <typename S> inline S maximum(S a, S b, S c, S d) { return maximum(maximum(maximum(a, b), c), d); } |
132 | |
133 | template <typename S> inline S minimum(S a, S b) { return (a < b) ? a : b; } |
134 | template <typename S> inline S minimum(S a, S b, S c) { return minimum(minimum(a, b), c); } |
135 | template <typename S> inline S minimum(S a, S b, S c, S d) { return minimum(minimum(minimum(a, b), c), d); } |
136 | |
137 | inline float clampf(float value, float low, float high) { if (value < low) value = low; else if (value > high) value = high; return value; } |
138 | inline float saturate(float value) { return clampf(value, 0, 1.0f); } |
139 | inline uint8_t minimumub(uint8_t a, uint8_t b) { return (a < b) ? a : b; } |
140 | inline uint32_t minimumu(uint32_t a, uint32_t b) { return (a < b) ? a : b; } |
141 | inline int32_t minimumi(int32_t a, int32_t b) { return (a < b) ? a : b; } |
142 | inline float minimumf(float a, float b) { return (a < b) ? a : b; } |
143 | inline uint8_t maximumub(uint8_t a, uint8_t b) { return (a > b) ? a : b; } |
144 | inline uint32_t maximumu(uint32_t a, uint32_t b) { return (a > b) ? a : b; } |
145 | inline int32_t maximumi(int32_t a, int32_t b) { return (a > b) ? a : b; } |
146 | inline float maximumf(float a, float b) { return (a > b) ? a : b; } |
147 | inline int squarei(int i) { return i * i; } |
148 | inline float squaref(float i) { return i * i; } |
149 | template<typename T> inline T square(T a) { return a * a; } |
150 | |
151 | template <typename S> inline S clamp(S value, S low, S high) { return (value < low) ? low : ((value > high) ? high : value); } |
152 | |
153 | inline uint32_t iabs(int32_t i) { return (i < 0) ? static_cast<uint32_t>(-i) : static_cast<uint32_t>(i); } |
154 | inline uint64_t iabs64(int64_t i) { return (i < 0) ? static_cast<uint64_t>(-i) : static_cast<uint64_t>(i); } |
155 | |
156 | template<typename T> inline void clear_vector(T &vec) { vec.erase(vec.begin(), vec.end()); } |
157 | template<typename T> inline typename T::value_type *enlarge_vector(T &vec, size_t n) { size_t cs = vec.size(); vec.resize(cs + n); return &vec[cs]; } |
158 | |
159 | inline bool is_pow2(uint32_t x) { return x && ((x & (x - 1U)) == 0U); } |
160 | inline bool is_pow2(uint64_t x) { return x && ((x & (x - 1U)) == 0U); } |
161 | |
162 | template<typename T> inline T open_range_check(T v, T minv, T maxv) { assert(v >= minv && v < maxv); BASISU_NOTE_UNUSED(minv); BASISU_NOTE_UNUSED(maxv); return v; } |
163 | template<typename T> inline T open_range_check(T v, T maxv) { assert(v < maxv); BASISU_NOTE_UNUSED(maxv); return v; } |
164 | |
165 | inline uint32_t total_bits(uint32_t v) { uint32_t l = 0; for ( ; v > 0U; ++l) v >>= 1; return l; } |
166 | |
167 | template<typename T> inline T saturate(T val) { return clamp(val, 0.0f, 1.0f); } |
168 | |
169 | template<typename T, typename R> inline void append_vector(T &vec, const R *pObjs, size_t n) |
170 | { |
171 | if (n) |
172 | { |
173 | if (vec.size()) |
174 | { |
175 | assert((pObjs + n) <= vec.begin() || (pObjs >= vec.end())); |
176 | } |
177 | const size_t cur_s = vec.size(); |
178 | vec.resize(cur_s + n); |
179 | memcpy(&vec[cur_s], pObjs, sizeof(R) * n); |
180 | } |
181 | } |
182 | |
183 | template<typename T> inline void append_vector(T &vec, const T &other_vec) |
184 | { |
185 | assert(&vec != &other_vec); |
186 | if (other_vec.size()) |
187 | append_vector(vec, &other_vec[0], other_vec.size()); |
188 | } |
189 | |
190 | template<typename T> inline void vector_ensure_element_is_valid(T &vec, size_t idx) |
191 | { |
192 | if (idx >= vec.size()) |
193 | vec.resize(idx + 1); |
194 | } |
195 | |
196 | template<typename T> inline void vector_sort(T &vec) |
197 | { |
198 | if (vec.size()) |
199 | std::sort(vec.begin(), vec.end()); |
200 | } |
201 | |
202 | template<typename T, typename U> inline bool unordered_set_contains(T& set, const U&obj) |
203 | { |
204 | return set.find(obj) != set.end(); |
205 | } |
206 | |
207 | template<typename T> int vector_find(const T &vec, const typename T::value_type &obj) |
208 | { |
209 | assert(vec.size() <= INT_MAX); |
210 | for (size_t i = 0; i < vec.size(); i++) |
211 | if (vec[i] == obj) |
212 | return static_cast<int>(i); |
213 | return -1; |
214 | } |
215 | |
216 | template<typename T> void vector_set_all(T &vec, const typename T::value_type &obj) |
217 | { |
218 | for (size_t i = 0; i < vec.size(); i++) |
219 | vec[i] = obj; |
220 | } |
221 | |
222 | inline uint64_t read_be64(const void *p) |
223 | { |
224 | uint64_t val = 0; |
225 | for (uint32_t i = 0; i < 8; i++) |
226 | val |= (static_cast<uint64_t>(static_cast<const uint8_t *>(p)[7 - i]) << (i * 8)); |
227 | return val; |
228 | } |
229 | |
230 | inline void write_be64(void *p, uint64_t x) |
231 | { |
232 | for (uint32_t i = 0; i < 8; i++) |
233 | static_cast<uint8_t *>(p)[7 - i] = static_cast<uint8_t>(x >> (i * 8)); |
234 | } |
235 | |
236 | static inline uint16_t byteswap16(uint16_t x) { return static_cast<uint16_t>((x << 8) | (x >> 8)); } |
237 | static inline uint32_t byteswap32(uint32_t x) { return ((x << 24) | ((x << 8) & 0x00FF0000) | ((x >> 8) & 0x0000FF00) | (x >> 24)); } |
238 | |
239 | inline uint32_t floor_log2i(uint32_t v) |
240 | { |
241 | uint32_t b = 0; |
242 | for (; v > 1U; ++b) |
243 | v >>= 1; |
244 | return b; |
245 | } |
246 | |
247 | inline uint32_t ceil_log2i(uint32_t v) |
248 | { |
249 | uint32_t b = floor_log2i(v); |
250 | if ((b != 32) && (v > (1U << b))) |
251 | ++b; |
252 | return b; |
253 | } |
254 | |
255 | inline int posmod(int x, int y) |
256 | { |
257 | if (x >= 0) |
258 | return (x < y) ? x : (x % y); |
259 | int m = (-x) % y; |
260 | return (m != 0) ? (y - m) : m; |
261 | } |
262 | |
263 | inline bool do_excl_ranges_overlap(int la, int ha, int lb, int hb) |
264 | { |
265 | assert(la < ha && lb < hb); |
266 | if ((ha <= lb) || (la >= hb)) return false; |
267 | return true; |
268 | } |
269 | |
270 | static inline uint32_t read_le_dword(const uint8_t *pBytes) |
271 | { |
272 | return (pBytes[3] << 24U) | (pBytes[2] << 16U) | (pBytes[1] << 8U) | (pBytes[0]); |
273 | } |
274 | |
275 | static inline void write_le_dword(uint8_t* pBytes, uint32_t val) |
276 | { |
277 | pBytes[0] = (uint8_t)val; |
278 | pBytes[1] = (uint8_t)(val >> 8U); |
279 | pBytes[2] = (uint8_t)(val >> 16U); |
280 | pBytes[3] = (uint8_t)(val >> 24U); |
281 | } |
282 | |
283 | // Always little endian 1-8 byte unsigned int |
284 | template<uint32_t NumBytes> |
285 | struct packed_uint |
286 | { |
287 | uint8_t m_bytes[NumBytes]; |
288 | |
289 | inline packed_uint() { static_assert(NumBytes <= sizeof(uint64_t), "Invalid NumBytes" ); } |
290 | inline packed_uint(uint64_t v) { *this = v; } |
291 | inline packed_uint(const packed_uint& other) { *this = other; } |
292 | |
293 | inline packed_uint& operator= (uint64_t v) |
294 | { |
295 | for (uint32_t i = 0; i < NumBytes; i++) |
296 | m_bytes[i] = static_cast<uint8_t>(v >> (i * 8)); |
297 | return *this; |
298 | } |
299 | |
300 | inline packed_uint& operator= (const packed_uint& rhs) |
301 | { |
302 | memcpy(m_bytes, rhs.m_bytes, sizeof(m_bytes)); |
303 | return *this; |
304 | } |
305 | |
306 | inline operator uint32_t() const |
307 | { |
308 | switch (NumBytes) |
309 | { |
310 | case 1: |
311 | { |
312 | return m_bytes[0]; |
313 | } |
314 | case 2: |
315 | { |
316 | return (m_bytes[1] << 8U) | m_bytes[0]; |
317 | } |
318 | case 3: |
319 | { |
320 | return (m_bytes[2] << 16U) | (m_bytes[1] << 8U) | m_bytes[0]; |
321 | } |
322 | case 4: |
323 | { |
324 | return read_le_dword(m_bytes); |
325 | } |
326 | case 5: |
327 | { |
328 | uint32_t l = read_le_dword(m_bytes); |
329 | uint32_t h = m_bytes[4]; |
330 | return static_cast<uint64_t>(l) | (static_cast<uint64_t>(h) << 32U); |
331 | } |
332 | case 6: |
333 | { |
334 | uint32_t l = read_le_dword(m_bytes); |
335 | uint32_t h = (m_bytes[5] << 8U) | m_bytes[4]; |
336 | return static_cast<uint64_t>(l) | (static_cast<uint64_t>(h) << 32U); |
337 | } |
338 | case 7: |
339 | { |
340 | uint32_t l = read_le_dword(m_bytes); |
341 | uint32_t h = (m_bytes[6] << 16U) | (m_bytes[5] << 8U) | m_bytes[4]; |
342 | return static_cast<uint64_t>(l) | (static_cast<uint64_t>(h) << 32U); |
343 | } |
344 | case 8: |
345 | { |
346 | uint32_t l = read_le_dword(m_bytes); |
347 | uint32_t h = read_le_dword(m_bytes + 4); |
348 | return static_cast<uint64_t>(l) | (static_cast<uint64_t>(h) << 32U); |
349 | } |
350 | default: |
351 | { |
352 | assert(0); |
353 | return 0; |
354 | } |
355 | } |
356 | } |
357 | }; |
358 | |
359 | enum eZero { cZero }; |
360 | enum eNoClamp { cNoClamp }; |
361 | |
362 | // Rice/Huffman entropy coding |
363 | |
364 | // This is basically Deflate-style canonical Huffman, except we allow for a lot more symbols. |
365 | enum |
366 | { |
367 | cHuffmanMaxSupportedCodeSize = 16, cHuffmanMaxSupportedInternalCodeSize = 31, |
368 | cHuffmanFastLookupBits = 10, |
369 | cHuffmanMaxSymsLog2 = 14, cHuffmanMaxSyms = 1 << cHuffmanMaxSymsLog2, |
370 | |
371 | // Small zero runs |
372 | cHuffmanSmallZeroRunSizeMin = 3, cHuffmanSmallZeroRunSizeMax = 10, = 3, |
373 | |
374 | // Big zero run |
375 | cHuffmanBigZeroRunSizeMin = 11, cHuffmanBigZeroRunSizeMax = 138, = 7, |
376 | |
377 | // Small non-zero run |
378 | cHuffmanSmallRepeatSizeMin = 3, cHuffmanSmallRepeatSizeMax = 6, = 2, |
379 | |
380 | // Big non-zero run |
381 | cHuffmanBigRepeatSizeMin = 7, cHuffmanBigRepeatSizeMax = 134, = 7, |
382 | |
383 | cHuffmanTotalCodelengthCodes = 21, cHuffmanSmallZeroRunCode = 17, cHuffmanBigZeroRunCode = 18, cHuffmanSmallRepeatCode = 19, cHuffmanBigRepeatCode = 20 |
384 | }; |
385 | |
386 | static const uint8_t g_huffman_sorted_codelength_codes[] = { cHuffmanSmallZeroRunCode, cHuffmanBigZeroRunCode, cHuffmanSmallRepeatCode, cHuffmanBigRepeatCode, 0, 8, 7, 9, 6, 0xA, 5, 0xB, 4, 0xC, 3, 0xD, 2, 0xE, 1, 0xF, 0x10 }; |
387 | const uint32_t cHuffmanTotalSortedCodelengthCodes = sizeof(g_huffman_sorted_codelength_codes) / sizeof(g_huffman_sorted_codelength_codes[0]); |
388 | |
389 | // GPU texture formats |
390 | |
391 | enum class texture_format |
392 | { |
393 | cInvalidTextureFormat = -1, |
394 | |
395 | // Block-based formats |
396 | cETC1, // ETC1 |
397 | cETC1S, // ETC1 (subset: diff colors only, no subblocks) |
398 | cETC2_RGB, // ETC2 color block (basisu doesn't support ETC2 planar/T/H modes - just basic ETC1) |
399 | cETC2_RGBA, // ETC2 EAC alpha block followed by ETC2 color block |
400 | cETC2_ALPHA, // ETC2 EAC alpha block |
401 | cBC1, // DXT1 |
402 | cBC3, // DXT5 (BC4/DXT5A block followed by a BC1/DXT1 block) |
403 | cBC4, // DXT5A |
404 | cBC5, // 3DC/DXN (two BC4/DXT5A blocks) |
405 | cBC7, |
406 | cASTC4x4, // LDR only |
407 | cPVRTC1_4_RGB, |
408 | cPVRTC1_4_RGBA, |
409 | cATC_RGB, |
410 | cATC_RGBA_INTERPOLATED_ALPHA, |
411 | cFXT1_RGB, |
412 | cPVRTC2_4_RGBA, |
413 | cETC2_R11_EAC, |
414 | cETC2_RG11_EAC, |
415 | cUASTC4x4, |
416 | cBC1_NV, |
417 | cBC1_AMD, |
418 | |
419 | // Uncompressed/raw pixels |
420 | cRGBA32, |
421 | cRGB565, |
422 | cBGR565, |
423 | cRGBA4444, |
424 | cABGR4444 |
425 | }; |
426 | |
427 | inline uint32_t get_bytes_per_block(texture_format fmt) |
428 | { |
429 | switch (fmt) |
430 | { |
431 | case texture_format::cETC1: |
432 | case texture_format::cETC1S: |
433 | case texture_format::cETC2_RGB: |
434 | case texture_format::cETC2_ALPHA: |
435 | case texture_format::cBC1: |
436 | case texture_format::cBC1_NV: |
437 | case texture_format::cBC1_AMD: |
438 | case texture_format::cBC4: |
439 | case texture_format::cPVRTC1_4_RGB: |
440 | case texture_format::cPVRTC1_4_RGBA: |
441 | case texture_format::cATC_RGB: |
442 | case texture_format::cPVRTC2_4_RGBA: |
443 | case texture_format::cETC2_R11_EAC: |
444 | return 8; |
445 | case texture_format::cRGBA32: |
446 | return sizeof(uint32_t) * 16; |
447 | default: |
448 | break; |
449 | } |
450 | return 16; |
451 | } |
452 | |
453 | inline uint32_t get_qwords_per_block(texture_format fmt) |
454 | { |
455 | return get_bytes_per_block(fmt) >> 3; |
456 | } |
457 | |
458 | inline uint32_t get_block_width(texture_format fmt) |
459 | { |
460 | BASISU_NOTE_UNUSED(fmt); |
461 | switch (fmt) |
462 | { |
463 | case texture_format::cFXT1_RGB: |
464 | return 8; |
465 | default: |
466 | break; |
467 | } |
468 | return 4; |
469 | } |
470 | |
471 | inline uint32_t get_block_height(texture_format fmt) |
472 | { |
473 | BASISU_NOTE_UNUSED(fmt); |
474 | return 4; |
475 | } |
476 | |
477 | } // namespace basisu |
478 | |
479 | |