1 | /* ****************************************************************** |
2 | * Huffman encoder, part of New Generation Entropy library |
3 | * Copyright (c) Meta Platforms, Inc. and affiliates. |
4 | * |
5 | * You can contact the author at : |
6 | * - FSE+HUF source repository : https://github.com/Cyan4973/FiniteStateEntropy |
7 | * - Public forum : https://groups.google.com/forum/#!forum/lz4c |
8 | * |
9 | * This source code is licensed under both the BSD-style license (found in the |
10 | * LICENSE file in the root directory of this source tree) and the GPLv2 (found |
11 | * in the COPYING file in the root directory of this source tree). |
12 | * You may select, at your option, one of the above-listed licenses. |
13 | ****************************************************************** */ |
14 | |
15 | /* ************************************************************** |
16 | * Compiler specifics |
17 | ****************************************************************/ |
18 | #ifdef _MSC_VER /* Visual Studio */ |
19 | # pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */ |
20 | #endif |
21 | |
22 | |
23 | /* ************************************************************** |
24 | * Includes |
25 | ****************************************************************/ |
26 | #include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset */ |
27 | #include "../common/compiler.h" |
28 | #include "../common/bitstream.h" |
29 | #include "hist.h" |
30 | #define FSE_STATIC_LINKING_ONLY /* FSE_optimalTableLog_internal */ |
31 | #include "../common/fse.h" /* header compression */ |
32 | #include "../common/huf.h" |
33 | #include "../common/error_private.h" |
34 | #include "../common/bits.h" /* ZSTD_highbit32 */ |
35 | |
36 | |
37 | /* ************************************************************** |
38 | * Error Management |
39 | ****************************************************************/ |
40 | #define HUF_isError ERR_isError |
41 | #define HUF_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c) /* use only *after* variable declarations */ |
42 | |
43 | |
44 | /* ************************************************************** |
45 | * Required declarations |
46 | ****************************************************************/ |
47 | typedef struct nodeElt_s { |
48 | U32 count; |
49 | U16 parent; |
50 | BYTE byte; |
51 | BYTE nbBits; |
52 | } nodeElt; |
53 | |
54 | |
55 | /* ************************************************************** |
56 | * Debug Traces |
57 | ****************************************************************/ |
58 | |
59 | #if DEBUGLEVEL >= 2 |
60 | |
61 | static size_t showU32(const U32* arr, size_t size) |
62 | { |
63 | size_t u; |
64 | for (u=0; u<size; u++) { |
65 | RAWLOG(6, " %u" , arr[u]); (void)arr; |
66 | } |
67 | RAWLOG(6, " \n" ); |
68 | return size; |
69 | } |
70 | |
71 | static size_t HUF_getNbBits(HUF_CElt elt); |
72 | |
73 | static size_t showCTableBits(const HUF_CElt* ctable, size_t size) |
74 | { |
75 | size_t u; |
76 | for (u=0; u<size; u++) { |
77 | RAWLOG(6, " %zu" , HUF_getNbBits(ctable[u])); (void)ctable; |
78 | } |
79 | RAWLOG(6, " \n" ); |
80 | return size; |
81 | |
82 | } |
83 | |
84 | static size_t showHNodeSymbols(const nodeElt* hnode, size_t size) |
85 | { |
86 | size_t u; |
87 | for (u=0; u<size; u++) { |
88 | RAWLOG(6, " %u" , hnode[u].byte); (void)hnode; |
89 | } |
90 | RAWLOG(6, " \n" ); |
91 | return size; |
92 | } |
93 | |
94 | static size_t showHNodeBits(const nodeElt* hnode, size_t size) |
95 | { |
96 | size_t u; |
97 | for (u=0; u<size; u++) { |
98 | RAWLOG(6, " %u" , hnode[u].nbBits); (void)hnode; |
99 | } |
100 | RAWLOG(6, " \n" ); |
101 | return size; |
102 | } |
103 | |
104 | #endif |
105 | |
106 | |
107 | /* ******************************************************* |
108 | * HUF : Huffman block compression |
109 | *********************************************************/ |
110 | #define HUF_WORKSPACE_MAX_ALIGNMENT 8 |
111 | |
112 | static void* HUF_alignUpWorkspace(void* workspace, size_t* workspaceSizePtr, size_t align) |
113 | { |
114 | size_t const mask = align - 1; |
115 | size_t const rem = (size_t)workspace & mask; |
116 | size_t const add = (align - rem) & mask; |
117 | BYTE* const aligned = (BYTE*)workspace + add; |
118 | assert((align & (align - 1)) == 0); /* pow 2 */ |
119 | assert(align <= HUF_WORKSPACE_MAX_ALIGNMENT); |
120 | if (*workspaceSizePtr >= add) { |
121 | assert(add < align); |
122 | assert(((size_t)aligned & mask) == 0); |
123 | *workspaceSizePtr -= add; |
124 | return aligned; |
125 | } else { |
126 | *workspaceSizePtr = 0; |
127 | return NULL; |
128 | } |
129 | } |
130 | |
131 | |
132 | /* HUF_compressWeights() : |
133 | * Same as FSE_compress(), but dedicated to huff0's weights compression. |
134 | * The use case needs much less stack memory. |
135 | * Note : all elements within weightTable are supposed to be <= HUF_TABLELOG_MAX. |
136 | */ |
137 | #define 6 |
138 | |
139 | typedef struct { |
140 | FSE_CTable CTable[FSE_CTABLE_SIZE_U32(MAX_FSE_TABLELOG_FOR_HUFF_HEADER, HUF_TABLELOG_MAX)]; |
141 | U32 scratchBuffer[FSE_BUILD_CTABLE_WORKSPACE_SIZE_U32(HUF_TABLELOG_MAX, MAX_FSE_TABLELOG_FOR_HUFF_HEADER)]; |
142 | unsigned count[HUF_TABLELOG_MAX+1]; |
143 | S16 norm[HUF_TABLELOG_MAX+1]; |
144 | } HUF_CompressWeightsWksp; |
145 | |
146 | static size_t |
147 | HUF_compressWeights(void* dst, size_t dstSize, |
148 | const void* weightTable, size_t wtSize, |
149 | void* workspace, size_t workspaceSize) |
150 | { |
151 | BYTE* const ostart = (BYTE*) dst; |
152 | BYTE* op = ostart; |
153 | BYTE* const oend = ostart + dstSize; |
154 | |
155 | unsigned maxSymbolValue = HUF_TABLELOG_MAX; |
156 | U32 tableLog = MAX_FSE_TABLELOG_FOR_HUFF_HEADER; |
157 | HUF_CompressWeightsWksp* wksp = (HUF_CompressWeightsWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32)); |
158 | |
159 | if (workspaceSize < sizeof(HUF_CompressWeightsWksp)) return ERROR(GENERIC); |
160 | |
161 | /* init conditions */ |
162 | if (wtSize <= 1) return 0; /* Not compressible */ |
163 | |
164 | /* Scan input and build symbol stats */ |
165 | { unsigned const maxCount = HIST_count_simple(wksp->count, &maxSymbolValue, weightTable, wtSize); /* never fails */ |
166 | if (maxCount == wtSize) return 1; /* only a single symbol in src : rle */ |
167 | if (maxCount == 1) return 0; /* each symbol present maximum once => not compressible */ |
168 | } |
169 | |
170 | tableLog = FSE_optimalTableLog(tableLog, wtSize, maxSymbolValue); |
171 | CHECK_F( FSE_normalizeCount(wksp->norm, tableLog, wksp->count, wtSize, maxSymbolValue, /* useLowProbCount */ 0) ); |
172 | |
173 | /* Write table description header */ |
174 | { CHECK_V_F(hSize, FSE_writeNCount(op, (size_t)(oend-op), wksp->norm, maxSymbolValue, tableLog) ); |
175 | op += hSize; |
176 | } |
177 | |
178 | /* Compress */ |
179 | CHECK_F( FSE_buildCTable_wksp(wksp->CTable, wksp->norm, maxSymbolValue, tableLog, wksp->scratchBuffer, sizeof(wksp->scratchBuffer)) ); |
180 | { CHECK_V_F(cSize, FSE_compress_usingCTable(op, (size_t)(oend - op), weightTable, wtSize, wksp->CTable) ); |
181 | if (cSize == 0) return 0; /* not enough space for compressed data */ |
182 | op += cSize; |
183 | } |
184 | |
185 | return (size_t)(op-ostart); |
186 | } |
187 | |
188 | static size_t HUF_getNbBits(HUF_CElt elt) |
189 | { |
190 | return elt & 0xFF; |
191 | } |
192 | |
193 | static size_t HUF_getNbBitsFast(HUF_CElt elt) |
194 | { |
195 | return elt; |
196 | } |
197 | |
198 | static size_t HUF_getValue(HUF_CElt elt) |
199 | { |
200 | return elt & ~(size_t)0xFF; |
201 | } |
202 | |
203 | static size_t HUF_getValueFast(HUF_CElt elt) |
204 | { |
205 | return elt; |
206 | } |
207 | |
208 | static void HUF_setNbBits(HUF_CElt* elt, size_t nbBits) |
209 | { |
210 | assert(nbBits <= HUF_TABLELOG_ABSOLUTEMAX); |
211 | *elt = nbBits; |
212 | } |
213 | |
214 | static void HUF_setValue(HUF_CElt* elt, size_t value) |
215 | { |
216 | size_t const nbBits = HUF_getNbBits(*elt); |
217 | if (nbBits > 0) { |
218 | assert((value >> nbBits) == 0); |
219 | *elt |= value << (sizeof(HUF_CElt) * 8 - nbBits); |
220 | } |
221 | } |
222 | |
223 | typedef struct { |
224 | HUF_CompressWeightsWksp wksp; |
225 | BYTE bitsToWeight[HUF_TABLELOG_MAX + 1]; /* precomputed conversion table */ |
226 | BYTE huffWeight[HUF_SYMBOLVALUE_MAX]; |
227 | } HUF_WriteCTableWksp; |
228 | |
229 | size_t HUF_writeCTable_wksp(void* dst, size_t maxDstSize, |
230 | const HUF_CElt* CTable, unsigned maxSymbolValue, unsigned huffLog, |
231 | void* workspace, size_t workspaceSize) |
232 | { |
233 | HUF_CElt const* const ct = CTable + 1; |
234 | BYTE* op = (BYTE*)dst; |
235 | U32 n; |
236 | HUF_WriteCTableWksp* wksp = (HUF_WriteCTableWksp*)HUF_alignUpWorkspace(workspace, &workspaceSize, ZSTD_ALIGNOF(U32)); |
237 | |
238 | HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE >= sizeof(HUF_WriteCTableWksp)); |
239 | |
240 | /* check conditions */ |
241 | if (workspaceSize < sizeof(HUF_WriteCTableWksp)) return ERROR(GENERIC); |
242 | if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); |
243 | |
244 | /* convert to weight */ |
245 | wksp->bitsToWeight[0] = 0; |
246 | for (n=1; n<huffLog+1; n++) |
247 | wksp->bitsToWeight[n] = (BYTE)(huffLog + 1 - n); |
248 | for (n=0; n<maxSymbolValue; n++) |
249 | wksp->huffWeight[n] = wksp->bitsToWeight[HUF_getNbBits(ct[n])]; |
250 | |
251 | /* attempt weights compression by FSE */ |
252 | if (maxDstSize < 1) return ERROR(dstSize_tooSmall); |
253 | { CHECK_V_F(hSize, HUF_compressWeights(op+1, maxDstSize-1, wksp->huffWeight, maxSymbolValue, &wksp->wksp, sizeof(wksp->wksp)) ); |
254 | if ((hSize>1) & (hSize < maxSymbolValue/2)) { /* FSE compressed */ |
255 | op[0] = (BYTE)hSize; |
256 | return hSize+1; |
257 | } } |
258 | |
259 | /* write raw values as 4-bits (max : 15) */ |
260 | if (maxSymbolValue > (256-128)) return ERROR(GENERIC); /* should not happen : likely means source cannot be compressed */ |
261 | if (((maxSymbolValue+1)/2) + 1 > maxDstSize) return ERROR(dstSize_tooSmall); /* not enough space within dst buffer */ |
262 | op[0] = (BYTE)(128 /*special case*/ + (maxSymbolValue-1)); |
263 | wksp->huffWeight[maxSymbolValue] = 0; /* to be sure it doesn't cause msan issue in final combination */ |
264 | for (n=0; n<maxSymbolValue; n+=2) |
265 | op[(n/2)+1] = (BYTE)((wksp->huffWeight[n] << 4) + wksp->huffWeight[n+1]); |
266 | return ((maxSymbolValue+1)/2) + 1; |
267 | } |
268 | |
269 | |
270 | size_t HUF_readCTable (HUF_CElt* CTable, unsigned* maxSymbolValuePtr, const void* src, size_t srcSize, unsigned* hasZeroWeights) |
271 | { |
272 | BYTE huffWeight[HUF_SYMBOLVALUE_MAX + 1]; /* init not required, even though some static analyzer may complain */ |
273 | U32 rankVal[HUF_TABLELOG_ABSOLUTEMAX + 1]; /* large enough for values from 0 to 16 */ |
274 | U32 tableLog = 0; |
275 | U32 nbSymbols = 0; |
276 | HUF_CElt* const ct = CTable + 1; |
277 | |
278 | /* get symbol weights */ |
279 | CHECK_V_F(readSize, HUF_readStats(huffWeight, HUF_SYMBOLVALUE_MAX+1, rankVal, &nbSymbols, &tableLog, src, srcSize)); |
280 | *hasZeroWeights = (rankVal[0] > 0); |
281 | |
282 | /* check result */ |
283 | if (tableLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); |
284 | if (nbSymbols > *maxSymbolValuePtr+1) return ERROR(maxSymbolValue_tooSmall); |
285 | |
286 | CTable[0] = tableLog; |
287 | |
288 | /* Prepare base value per rank */ |
289 | { U32 n, = 0; |
290 | for (n=1; n<=tableLog; n++) { |
291 | U32 curr = nextRankStart; |
292 | nextRankStart += (rankVal[n] << (n-1)); |
293 | rankVal[n] = curr; |
294 | } } |
295 | |
296 | /* fill nbBits */ |
297 | { U32 n; for (n=0; n<nbSymbols; n++) { |
298 | const U32 w = huffWeight[n]; |
299 | HUF_setNbBits(ct + n, (BYTE)(tableLog + 1 - w) & -(w != 0)); |
300 | } } |
301 | |
302 | /* fill val */ |
303 | { U16 nbPerRank[HUF_TABLELOG_MAX+2] = {0}; /* support w=0=>n=tableLog+1 */ |
304 | U16 valPerRank[HUF_TABLELOG_MAX+2] = {0}; |
305 | { U32 n; for (n=0; n<nbSymbols; n++) nbPerRank[HUF_getNbBits(ct[n])]++; } |
306 | /* determine stating value per rank */ |
307 | valPerRank[tableLog+1] = 0; /* for w==0 */ |
308 | { U16 min = 0; |
309 | U32 n; for (n=tableLog; n>0; n--) { /* start at n=tablelog <-> w=1 */ |
310 | valPerRank[n] = min; /* get starting value within each rank */ |
311 | min += nbPerRank[n]; |
312 | min >>= 1; |
313 | } } |
314 | /* assign value within rank, symbol order */ |
315 | { U32 n; for (n=0; n<nbSymbols; n++) HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); } |
316 | } |
317 | |
318 | *maxSymbolValuePtr = nbSymbols - 1; |
319 | return readSize; |
320 | } |
321 | |
322 | U32 HUF_getNbBitsFromCTable(HUF_CElt const* CTable, U32 symbolValue) |
323 | { |
324 | const HUF_CElt* const ct = CTable + 1; |
325 | assert(symbolValue <= HUF_SYMBOLVALUE_MAX); |
326 | return (U32)HUF_getNbBits(ct[symbolValue]); |
327 | } |
328 | |
329 | |
330 | /** |
331 | * HUF_setMaxHeight(): |
332 | * Try to enforce @targetNbBits on the Huffman tree described in @huffNode. |
333 | * |
334 | * It attempts to convert all nodes with nbBits > @targetNbBits |
335 | * to employ @targetNbBits instead. Then it adjusts the tree |
336 | * so that it remains a valid canonical Huffman tree. |
337 | * |
338 | * @pre The sum of the ranks of each symbol == 2^largestBits, |
339 | * where largestBits == huffNode[lastNonNull].nbBits. |
340 | * @post The sum of the ranks of each symbol == 2^largestBits, |
341 | * where largestBits is the return value (expected <= targetNbBits). |
342 | * |
343 | * @param huffNode The Huffman tree modified in place to enforce targetNbBits. |
344 | * It's presumed sorted, from most frequent to rarest symbol. |
345 | * @param lastNonNull The symbol with the lowest count in the Huffman tree. |
346 | * @param targetNbBits The allowed number of bits, which the Huffman tree |
347 | * may not respect. After this function the Huffman tree will |
348 | * respect targetNbBits. |
349 | * @return The maximum number of bits of the Huffman tree after adjustment. |
350 | */ |
351 | static U32 HUF_setMaxHeight(nodeElt* huffNode, U32 lastNonNull, U32 targetNbBits) |
352 | { |
353 | const U32 largestBits = huffNode[lastNonNull].nbBits; |
354 | /* early exit : no elt > targetNbBits, so the tree is already valid. */ |
355 | if (largestBits <= targetNbBits) return largestBits; |
356 | |
357 | DEBUGLOG(5, "HUF_setMaxHeight (targetNbBits = %u)" , targetNbBits); |
358 | |
359 | /* there are several too large elements (at least >= 2) */ |
360 | { int totalCost = 0; |
361 | const U32 baseCost = 1 << (largestBits - targetNbBits); |
362 | int n = (int)lastNonNull; |
363 | |
364 | /* Adjust any ranks > targetNbBits to targetNbBits. |
365 | * Compute totalCost, which is how far the sum of the ranks is |
366 | * we are over 2^largestBits after adjust the offending ranks. |
367 | */ |
368 | while (huffNode[n].nbBits > targetNbBits) { |
369 | totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits)); |
370 | huffNode[n].nbBits = (BYTE)targetNbBits; |
371 | n--; |
372 | } |
373 | /* n stops at huffNode[n].nbBits <= targetNbBits */ |
374 | assert(huffNode[n].nbBits <= targetNbBits); |
375 | /* n end at index of smallest symbol using < targetNbBits */ |
376 | while (huffNode[n].nbBits == targetNbBits) --n; |
377 | |
378 | /* renorm totalCost from 2^largestBits to 2^targetNbBits |
379 | * note : totalCost is necessarily a multiple of baseCost */ |
380 | assert(((U32)totalCost & (baseCost - 1)) == 0); |
381 | totalCost >>= (largestBits - targetNbBits); |
382 | assert(totalCost > 0); |
383 | |
384 | /* repay normalized cost */ |
385 | { U32 const noSymbol = 0xF0F0F0F0; |
386 | U32 rankLast[HUF_TABLELOG_MAX+2]; |
387 | |
388 | /* Get pos of last (smallest = lowest cum. count) symbol per rank */ |
389 | ZSTD_memset(rankLast, 0xF0, sizeof(rankLast)); |
390 | { U32 currentNbBits = targetNbBits; |
391 | int pos; |
392 | for (pos=n ; pos >= 0; pos--) { |
393 | if (huffNode[pos].nbBits >= currentNbBits) continue; |
394 | currentNbBits = huffNode[pos].nbBits; /* < targetNbBits */ |
395 | rankLast[targetNbBits-currentNbBits] = (U32)pos; |
396 | } } |
397 | |
398 | while (totalCost > 0) { |
399 | /* Try to reduce the next power of 2 above totalCost because we |
400 | * gain back half the rank. |
401 | */ |
402 | U32 nBitsToDecrease = ZSTD_highbit32((U32)totalCost) + 1; |
403 | for ( ; nBitsToDecrease > 1; nBitsToDecrease--) { |
404 | U32 const highPos = rankLast[nBitsToDecrease]; |
405 | U32 const lowPos = rankLast[nBitsToDecrease-1]; |
406 | if (highPos == noSymbol) continue; |
407 | /* Decrease highPos if no symbols of lowPos or if it is |
408 | * not cheaper to remove 2 lowPos than highPos. |
409 | */ |
410 | if (lowPos == noSymbol) break; |
411 | { U32 const highTotal = huffNode[highPos].count; |
412 | U32 const lowTotal = 2 * huffNode[lowPos].count; |
413 | if (highTotal <= lowTotal) break; |
414 | } } |
415 | /* only triggered when no more rank 1 symbol left => find closest one (note : there is necessarily at least one !) */ |
416 | assert(rankLast[nBitsToDecrease] != noSymbol || nBitsToDecrease == 1); |
417 | /* HUF_MAX_TABLELOG test just to please gcc 5+; but it should not be necessary */ |
418 | while ((nBitsToDecrease<=HUF_TABLELOG_MAX) && (rankLast[nBitsToDecrease] == noSymbol)) |
419 | nBitsToDecrease++; |
420 | assert(rankLast[nBitsToDecrease] != noSymbol); |
421 | /* Increase the number of bits to gain back half the rank cost. */ |
422 | totalCost -= 1 << (nBitsToDecrease-1); |
423 | huffNode[rankLast[nBitsToDecrease]].nbBits++; |
424 | |
425 | /* Fix up the new rank. |
426 | * If the new rank was empty, this symbol is now its smallest. |
427 | * Otherwise, this symbol will be the largest in the new rank so no adjustment. |
428 | */ |
429 | if (rankLast[nBitsToDecrease-1] == noSymbol) |
430 | rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]; |
431 | /* Fix up the old rank. |
432 | * If the symbol was at position 0, meaning it was the highest weight symbol in the tree, |
433 | * it must be the only symbol in its rank, so the old rank now has no symbols. |
434 | * Otherwise, since the Huffman nodes are sorted by count, the previous position is now |
435 | * the smallest node in the rank. If the previous position belongs to a different rank, |
436 | * then the rank is now empty. |
437 | */ |
438 | if (rankLast[nBitsToDecrease] == 0) /* special case, reached largest symbol */ |
439 | rankLast[nBitsToDecrease] = noSymbol; |
440 | else { |
441 | rankLast[nBitsToDecrease]--; |
442 | if (huffNode[rankLast[nBitsToDecrease]].nbBits != targetNbBits-nBitsToDecrease) |
443 | rankLast[nBitsToDecrease] = noSymbol; /* this rank is now empty */ |
444 | } |
445 | } /* while (totalCost > 0) */ |
446 | |
447 | /* If we've removed too much weight, then we have to add it back. |
448 | * To avoid overshooting again, we only adjust the smallest rank. |
449 | * We take the largest nodes from the lowest rank 0 and move them |
450 | * to rank 1. There's guaranteed to be enough rank 0 symbols because |
451 | * TODO. |
452 | */ |
453 | while (totalCost < 0) { /* Sometimes, cost correction overshoot */ |
454 | /* special case : no rank 1 symbol (using targetNbBits-1); |
455 | * let's create one from largest rank 0 (using targetNbBits). |
456 | */ |
457 | if (rankLast[1] == noSymbol) { |
458 | while (huffNode[n].nbBits == targetNbBits) n--; |
459 | huffNode[n+1].nbBits--; |
460 | assert(n >= 0); |
461 | rankLast[1] = (U32)(n+1); |
462 | totalCost++; |
463 | continue; |
464 | } |
465 | huffNode[ rankLast[1] + 1 ].nbBits--; |
466 | rankLast[1]++; |
467 | totalCost ++; |
468 | } |
469 | } /* repay normalized cost */ |
470 | } /* there are several too large elements (at least >= 2) */ |
471 | |
472 | return targetNbBits; |
473 | } |
474 | |
475 | typedef struct { |
476 | U16 base; |
477 | U16 curr; |
478 | } rankPos; |
479 | |
480 | typedef nodeElt huffNodeTable[2 * (HUF_SYMBOLVALUE_MAX + 1)]; |
481 | |
482 | /* Number of buckets available for HUF_sort() */ |
483 | #define RANK_POSITION_TABLE_SIZE 192 |
484 | |
485 | typedef struct { |
486 | huffNodeTable huffNodeTbl; |
487 | rankPos rankPosition[RANK_POSITION_TABLE_SIZE]; |
488 | } HUF_buildCTable_wksp_tables; |
489 | |
490 | /* RANK_POSITION_DISTINCT_COUNT_CUTOFF == Cutoff point in HUF_sort() buckets for which we use log2 bucketing. |
491 | * Strategy is to use as many buckets as possible for representing distinct |
492 | * counts while using the remainder to represent all "large" counts. |
493 | * |
494 | * To satisfy this requirement for 192 buckets, we can do the following: |
495 | * Let buckets 0-166 represent distinct counts of [0, 166] |
496 | * Let buckets 166 to 192 represent all remaining counts up to RANK_POSITION_MAX_COUNT_LOG using log2 bucketing. |
497 | */ |
498 | #define RANK_POSITION_MAX_COUNT_LOG 32 |
499 | #define RANK_POSITION_LOG_BUCKETS_BEGIN ((RANK_POSITION_TABLE_SIZE - 1) - RANK_POSITION_MAX_COUNT_LOG - 1 /* == 158 */) |
500 | #define RANK_POSITION_DISTINCT_COUNT_CUTOFF (RANK_POSITION_LOG_BUCKETS_BEGIN + ZSTD_highbit32(RANK_POSITION_LOG_BUCKETS_BEGIN) /* == 166 */) |
501 | |
502 | /* Return the appropriate bucket index for a given count. See definition of |
503 | * RANK_POSITION_DISTINCT_COUNT_CUTOFF for explanation of bucketing strategy. |
504 | */ |
505 | static U32 HUF_getIndex(U32 const count) { |
506 | return (count < RANK_POSITION_DISTINCT_COUNT_CUTOFF) |
507 | ? count |
508 | : ZSTD_highbit32(count) + RANK_POSITION_LOG_BUCKETS_BEGIN; |
509 | } |
510 | |
511 | /* Helper swap function for HUF_quickSortPartition() */ |
512 | static void HUF_swapNodes(nodeElt* a, nodeElt* b) { |
513 | nodeElt tmp = *a; |
514 | *a = *b; |
515 | *b = tmp; |
516 | } |
517 | |
518 | /* Returns 0 if the huffNode array is not sorted by descending count */ |
519 | MEM_STATIC int HUF_isSorted(nodeElt huffNode[], U32 const maxSymbolValue1) { |
520 | U32 i; |
521 | for (i = 1; i < maxSymbolValue1; ++i) { |
522 | if (huffNode[i].count > huffNode[i-1].count) { |
523 | return 0; |
524 | } |
525 | } |
526 | return 1; |
527 | } |
528 | |
529 | /* Insertion sort by descending order */ |
530 | HINT_INLINE void HUF_insertionSort(nodeElt huffNode[], int const low, int const high) { |
531 | int i; |
532 | int const size = high-low+1; |
533 | huffNode += low; |
534 | for (i = 1; i < size; ++i) { |
535 | nodeElt const key = huffNode[i]; |
536 | int j = i - 1; |
537 | while (j >= 0 && huffNode[j].count < key.count) { |
538 | huffNode[j + 1] = huffNode[j]; |
539 | j--; |
540 | } |
541 | huffNode[j + 1] = key; |
542 | } |
543 | } |
544 | |
545 | /* Pivot helper function for quicksort. */ |
546 | static int HUF_quickSortPartition(nodeElt arr[], int const low, int const high) { |
547 | /* Simply select rightmost element as pivot. "Better" selectors like |
548 | * median-of-three don't experimentally appear to have any benefit. |
549 | */ |
550 | U32 const pivot = arr[high].count; |
551 | int i = low - 1; |
552 | int j = low; |
553 | for ( ; j < high; j++) { |
554 | if (arr[j].count > pivot) { |
555 | i++; |
556 | HUF_swapNodes(&arr[i], &arr[j]); |
557 | } |
558 | } |
559 | HUF_swapNodes(&arr[i + 1], &arr[high]); |
560 | return i + 1; |
561 | } |
562 | |
563 | /* Classic quicksort by descending with partially iterative calls |
564 | * to reduce worst case callstack size. |
565 | */ |
566 | static void HUF_simpleQuickSort(nodeElt arr[], int low, int high) { |
567 | int const kInsertionSortThreshold = 8; |
568 | if (high - low < kInsertionSortThreshold) { |
569 | HUF_insertionSort(arr, low, high); |
570 | return; |
571 | } |
572 | while (low < high) { |
573 | int const idx = HUF_quickSortPartition(arr, low, high); |
574 | if (idx - low < high - idx) { |
575 | HUF_simpleQuickSort(arr, low, idx - 1); |
576 | low = idx + 1; |
577 | } else { |
578 | HUF_simpleQuickSort(arr, idx + 1, high); |
579 | high = idx - 1; |
580 | } |
581 | } |
582 | } |
583 | |
584 | /** |
585 | * HUF_sort(): |
586 | * Sorts the symbols [0, maxSymbolValue] by count[symbol] in decreasing order. |
587 | * This is a typical bucket sorting strategy that uses either quicksort or insertion sort to sort each bucket. |
588 | * |
589 | * @param[out] huffNode Sorted symbols by decreasing count. Only members `.count` and `.byte` are filled. |
590 | * Must have (maxSymbolValue + 1) entries. |
591 | * @param[in] count Histogram of the symbols. |
592 | * @param[in] maxSymbolValue Maximum symbol value. |
593 | * @param rankPosition This is a scratch workspace. Must have RANK_POSITION_TABLE_SIZE entries. |
594 | */ |
595 | static void HUF_sort(nodeElt huffNode[], const unsigned count[], U32 const maxSymbolValue, rankPos rankPosition[]) { |
596 | U32 n; |
597 | U32 const maxSymbolValue1 = maxSymbolValue+1; |
598 | |
599 | /* Compute base and set curr to base. |
600 | * For symbol s let lowerRank = HUF_getIndex(count[n]) and rank = lowerRank + 1. |
601 | * See HUF_getIndex to see bucketing strategy. |
602 | * We attribute each symbol to lowerRank's base value, because we want to know where |
603 | * each rank begins in the output, so for rank R we want to count ranks R+1 and above. |
604 | */ |
605 | ZSTD_memset(rankPosition, 0, sizeof(*rankPosition) * RANK_POSITION_TABLE_SIZE); |
606 | for (n = 0; n < maxSymbolValue1; ++n) { |
607 | U32 lowerRank = HUF_getIndex(count[n]); |
608 | assert(lowerRank < RANK_POSITION_TABLE_SIZE - 1); |
609 | rankPosition[lowerRank].base++; |
610 | } |
611 | |
612 | assert(rankPosition[RANK_POSITION_TABLE_SIZE - 1].base == 0); |
613 | /* Set up the rankPosition table */ |
614 | for (n = RANK_POSITION_TABLE_SIZE - 1; n > 0; --n) { |
615 | rankPosition[n-1].base += rankPosition[n].base; |
616 | rankPosition[n-1].curr = rankPosition[n-1].base; |
617 | } |
618 | |
619 | /* Insert each symbol into their appropriate bucket, setting up rankPosition table. */ |
620 | for (n = 0; n < maxSymbolValue1; ++n) { |
621 | U32 const c = count[n]; |
622 | U32 const r = HUF_getIndex(c) + 1; |
623 | U32 const pos = rankPosition[r].curr++; |
624 | assert(pos < maxSymbolValue1); |
625 | huffNode[pos].count = c; |
626 | huffNode[pos].byte = (BYTE)n; |
627 | } |
628 | |
629 | /* Sort each bucket. */ |
630 | for (n = RANK_POSITION_DISTINCT_COUNT_CUTOFF; n < RANK_POSITION_TABLE_SIZE - 1; ++n) { |
631 | int const bucketSize = rankPosition[n].curr - rankPosition[n].base; |
632 | U32 const bucketStartIdx = rankPosition[n].base; |
633 | if (bucketSize > 1) { |
634 | assert(bucketStartIdx < maxSymbolValue1); |
635 | HUF_simpleQuickSort(huffNode + bucketStartIdx, 0, bucketSize-1); |
636 | } |
637 | } |
638 | |
639 | assert(HUF_isSorted(huffNode, maxSymbolValue1)); |
640 | } |
641 | |
642 | |
643 | /** HUF_buildCTable_wksp() : |
644 | * Same as HUF_buildCTable(), but using externally allocated scratch buffer. |
645 | * `workSpace` must be aligned on 4-bytes boundaries, and be at least as large as sizeof(HUF_buildCTable_wksp_tables). |
646 | */ |
647 | #define STARTNODE (HUF_SYMBOLVALUE_MAX+1) |
648 | |
649 | /* HUF_buildTree(): |
650 | * Takes the huffNode array sorted by HUF_sort() and builds an unlimited-depth Huffman tree. |
651 | * |
652 | * @param huffNode The array sorted by HUF_sort(). Builds the Huffman tree in this array. |
653 | * @param maxSymbolValue The maximum symbol value. |
654 | * @return The smallest node in the Huffman tree (by count). |
655 | */ |
656 | static int HUF_buildTree(nodeElt* huffNode, U32 maxSymbolValue) |
657 | { |
658 | nodeElt* const huffNode0 = huffNode - 1; |
659 | int nonNullRank; |
660 | int lowS, lowN; |
661 | int nodeNb = STARTNODE; |
662 | int n, nodeRoot; |
663 | DEBUGLOG(5, "HUF_buildTree (alphabet size = %u)" , maxSymbolValue + 1); |
664 | /* init for parents */ |
665 | nonNullRank = (int)maxSymbolValue; |
666 | while(huffNode[nonNullRank].count == 0) nonNullRank--; |
667 | lowS = nonNullRank; nodeRoot = nodeNb + lowS - 1; lowN = nodeNb; |
668 | huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count; |
669 | huffNode[lowS].parent = huffNode[lowS-1].parent = (U16)nodeNb; |
670 | nodeNb++; lowS-=2; |
671 | for (n=nodeNb; n<=nodeRoot; n++) huffNode[n].count = (U32)(1U<<30); |
672 | huffNode0[0].count = (U32)(1U<<31); /* fake entry, strong barrier */ |
673 | |
674 | /* create parents */ |
675 | while (nodeNb <= nodeRoot) { |
676 | int const n1 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; |
677 | int const n2 = (huffNode[lowS].count < huffNode[lowN].count) ? lowS-- : lowN++; |
678 | huffNode[nodeNb].count = huffNode[n1].count + huffNode[n2].count; |
679 | huffNode[n1].parent = huffNode[n2].parent = (U16)nodeNb; |
680 | nodeNb++; |
681 | } |
682 | |
683 | /* distribute weights (unlimited tree height) */ |
684 | huffNode[nodeRoot].nbBits = 0; |
685 | for (n=nodeRoot-1; n>=STARTNODE; n--) |
686 | huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; |
687 | for (n=0; n<=nonNullRank; n++) |
688 | huffNode[n].nbBits = huffNode[ huffNode[n].parent ].nbBits + 1; |
689 | |
690 | DEBUGLOG(6, "Initial distribution of bits completed (%zu sorted symbols)" , showHNodeBits(huffNode, maxSymbolValue+1)); |
691 | |
692 | return nonNullRank; |
693 | } |
694 | |
695 | /** |
696 | * HUF_buildCTableFromTree(): |
697 | * Build the CTable given the Huffman tree in huffNode. |
698 | * |
699 | * @param[out] CTable The output Huffman CTable. |
700 | * @param huffNode The Huffman tree. |
701 | * @param nonNullRank The last and smallest node in the Huffman tree. |
702 | * @param maxSymbolValue The maximum symbol value. |
703 | * @param maxNbBits The exact maximum number of bits used in the Huffman tree. |
704 | */ |
705 | static void HUF_buildCTableFromTree(HUF_CElt* CTable, nodeElt const* huffNode, int nonNullRank, U32 maxSymbolValue, U32 maxNbBits) |
706 | { |
707 | HUF_CElt* const ct = CTable + 1; |
708 | /* fill result into ctable (val, nbBits) */ |
709 | int n; |
710 | U16 nbPerRank[HUF_TABLELOG_MAX+1] = {0}; |
711 | U16 valPerRank[HUF_TABLELOG_MAX+1] = {0}; |
712 | int const alphabetSize = (int)(maxSymbolValue + 1); |
713 | for (n=0; n<=nonNullRank; n++) |
714 | nbPerRank[huffNode[n].nbBits]++; |
715 | /* determine starting value per rank */ |
716 | { U16 min = 0; |
717 | for (n=(int)maxNbBits; n>0; n--) { |
718 | valPerRank[n] = min; /* get starting value within each rank */ |
719 | min += nbPerRank[n]; |
720 | min >>= 1; |
721 | } } |
722 | for (n=0; n<alphabetSize; n++) |
723 | HUF_setNbBits(ct + huffNode[n].byte, huffNode[n].nbBits); /* push nbBits per symbol, symbol order */ |
724 | for (n=0; n<alphabetSize; n++) |
725 | HUF_setValue(ct + n, valPerRank[HUF_getNbBits(ct[n])]++); /* assign value within rank, symbol order */ |
726 | CTable[0] = maxNbBits; |
727 | } |
728 | |
729 | size_t |
730 | HUF_buildCTable_wksp(HUF_CElt* CTable, const unsigned* count, U32 maxSymbolValue, U32 maxNbBits, |
731 | void* workSpace, size_t wkspSize) |
732 | { |
733 | HUF_buildCTable_wksp_tables* const wksp_tables = |
734 | (HUF_buildCTable_wksp_tables*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(U32)); |
735 | nodeElt* const huffNode0 = wksp_tables->huffNodeTbl; |
736 | nodeElt* const huffNode = huffNode0+1; |
737 | int nonNullRank; |
738 | |
739 | HUF_STATIC_ASSERT(HUF_CTABLE_WORKSPACE_SIZE == sizeof(HUF_buildCTable_wksp_tables)); |
740 | |
741 | DEBUGLOG(5, "HUF_buildCTable_wksp (alphabet size = %u)" , maxSymbolValue+1); |
742 | |
743 | /* safety checks */ |
744 | if (wkspSize < sizeof(HUF_buildCTable_wksp_tables)) |
745 | return ERROR(workSpace_tooSmall); |
746 | if (maxNbBits == 0) maxNbBits = HUF_TABLELOG_DEFAULT; |
747 | if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) |
748 | return ERROR(maxSymbolValue_tooLarge); |
749 | ZSTD_memset(huffNode0, 0, sizeof(huffNodeTable)); |
750 | |
751 | /* sort, decreasing order */ |
752 | HUF_sort(huffNode, count, maxSymbolValue, wksp_tables->rankPosition); |
753 | DEBUGLOG(6, "sorted symbols completed (%zu symbols)" , showHNodeSymbols(huffNode, maxSymbolValue+1)); |
754 | |
755 | /* build tree */ |
756 | nonNullRank = HUF_buildTree(huffNode, maxSymbolValue); |
757 | |
758 | /* determine and enforce maxTableLog */ |
759 | maxNbBits = HUF_setMaxHeight(huffNode, (U32)nonNullRank, maxNbBits); |
760 | if (maxNbBits > HUF_TABLELOG_MAX) return ERROR(GENERIC); /* check fit into table */ |
761 | |
762 | HUF_buildCTableFromTree(CTable, huffNode, nonNullRank, maxSymbolValue, maxNbBits); |
763 | |
764 | return maxNbBits; |
765 | } |
766 | |
767 | size_t HUF_estimateCompressedSize(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) |
768 | { |
769 | HUF_CElt const* ct = CTable + 1; |
770 | size_t nbBits = 0; |
771 | int s; |
772 | for (s = 0; s <= (int)maxSymbolValue; ++s) { |
773 | nbBits += HUF_getNbBits(ct[s]) * count[s]; |
774 | } |
775 | return nbBits >> 3; |
776 | } |
777 | |
778 | int HUF_validateCTable(const HUF_CElt* CTable, const unsigned* count, unsigned maxSymbolValue) { |
779 | HUF_CElt const* ct = CTable + 1; |
780 | int bad = 0; |
781 | int s; |
782 | for (s = 0; s <= (int)maxSymbolValue; ++s) { |
783 | bad |= (count[s] != 0) & (HUF_getNbBits(ct[s]) == 0); |
784 | } |
785 | return !bad; |
786 | } |
787 | |
788 | size_t HUF_compressBound(size_t size) { return HUF_COMPRESSBOUND(size); } |
789 | |
790 | /** HUF_CStream_t: |
791 | * Huffman uses its own BIT_CStream_t implementation. |
792 | * There are three major differences from BIT_CStream_t: |
793 | * 1. HUF_addBits() takes a HUF_CElt (size_t) which is |
794 | * the pair (nbBits, value) in the format: |
795 | * format: |
796 | * - Bits [0, 4) = nbBits |
797 | * - Bits [4, 64 - nbBits) = 0 |
798 | * - Bits [64 - nbBits, 64) = value |
799 | * 2. The bitContainer is built from the upper bits and |
800 | * right shifted. E.g. to add a new value of N bits |
801 | * you right shift the bitContainer by N, then or in |
802 | * the new value into the N upper bits. |
803 | * 3. The bitstream has two bit containers. You can add |
804 | * bits to the second container and merge them into |
805 | * the first container. |
806 | */ |
807 | |
808 | #define HUF_BITS_IN_CONTAINER (sizeof(size_t) * 8) |
809 | |
810 | typedef struct { |
811 | size_t bitContainer[2]; |
812 | size_t bitPos[2]; |
813 | |
814 | BYTE* startPtr; |
815 | BYTE* ptr; |
816 | BYTE* endPtr; |
817 | } HUF_CStream_t; |
818 | |
819 | /**! HUF_initCStream(): |
820 | * Initializes the bitstream. |
821 | * @returns 0 or an error code. |
822 | */ |
823 | static size_t HUF_initCStream(HUF_CStream_t* bitC, |
824 | void* startPtr, size_t dstCapacity) |
825 | { |
826 | ZSTD_memset(bitC, 0, sizeof(*bitC)); |
827 | bitC->startPtr = (BYTE*)startPtr; |
828 | bitC->ptr = bitC->startPtr; |
829 | bitC->endPtr = bitC->startPtr + dstCapacity - sizeof(bitC->bitContainer[0]); |
830 | if (dstCapacity <= sizeof(bitC->bitContainer[0])) return ERROR(dstSize_tooSmall); |
831 | return 0; |
832 | } |
833 | |
834 | /*! HUF_addBits(): |
835 | * Adds the symbol stored in HUF_CElt elt to the bitstream. |
836 | * |
837 | * @param elt The element we're adding. This is a (nbBits, value) pair. |
838 | * See the HUF_CStream_t docs for the format. |
839 | * @param idx Insert into the bitstream at this idx. |
840 | * @param kFast This is a template parameter. If the bitstream is guaranteed |
841 | * to have at least 4 unused bits after this call it may be 1, |
842 | * otherwise it must be 0. HUF_addBits() is faster when fast is set. |
843 | */ |
844 | FORCE_INLINE_TEMPLATE void HUF_addBits(HUF_CStream_t* bitC, HUF_CElt elt, int idx, int kFast) |
845 | { |
846 | assert(idx <= 1); |
847 | assert(HUF_getNbBits(elt) <= HUF_TABLELOG_ABSOLUTEMAX); |
848 | /* This is efficient on x86-64 with BMI2 because shrx |
849 | * only reads the low 6 bits of the register. The compiler |
850 | * knows this and elides the mask. When fast is set, |
851 | * every operation can use the same value loaded from elt. |
852 | */ |
853 | bitC->bitContainer[idx] >>= HUF_getNbBits(elt); |
854 | bitC->bitContainer[idx] |= kFast ? HUF_getValueFast(elt) : HUF_getValue(elt); |
855 | /* We only read the low 8 bits of bitC->bitPos[idx] so it |
856 | * doesn't matter that the high bits have noise from the value. |
857 | */ |
858 | bitC->bitPos[idx] += HUF_getNbBitsFast(elt); |
859 | assert((bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); |
860 | /* The last 4-bits of elt are dirty if fast is set, |
861 | * so we must not be overwriting bits that have already been |
862 | * inserted into the bit container. |
863 | */ |
864 | #if DEBUGLEVEL >= 1 |
865 | { |
866 | size_t const nbBits = HUF_getNbBits(elt); |
867 | size_t const dirtyBits = nbBits == 0 ? 0 : ZSTD_highbit32((U32)nbBits) + 1; |
868 | (void)dirtyBits; |
869 | /* Middle bits are 0. */ |
870 | assert(((elt >> dirtyBits) << (dirtyBits + nbBits)) == 0); |
871 | /* We didn't overwrite any bits in the bit container. */ |
872 | assert(!kFast || (bitC->bitPos[idx] & 0xFF) <= HUF_BITS_IN_CONTAINER); |
873 | (void)dirtyBits; |
874 | } |
875 | #endif |
876 | } |
877 | |
878 | FORCE_INLINE_TEMPLATE void HUF_zeroIndex1(HUF_CStream_t* bitC) |
879 | { |
880 | bitC->bitContainer[1] = 0; |
881 | bitC->bitPos[1] = 0; |
882 | } |
883 | |
884 | /*! HUF_mergeIndex1() : |
885 | * Merges the bit container @ index 1 into the bit container @ index 0 |
886 | * and zeros the bit container @ index 1. |
887 | */ |
888 | FORCE_INLINE_TEMPLATE void HUF_mergeIndex1(HUF_CStream_t* bitC) |
889 | { |
890 | assert((bitC->bitPos[1] & 0xFF) < HUF_BITS_IN_CONTAINER); |
891 | bitC->bitContainer[0] >>= (bitC->bitPos[1] & 0xFF); |
892 | bitC->bitContainer[0] |= bitC->bitContainer[1]; |
893 | bitC->bitPos[0] += bitC->bitPos[1]; |
894 | assert((bitC->bitPos[0] & 0xFF) <= HUF_BITS_IN_CONTAINER); |
895 | } |
896 | |
897 | /*! HUF_flushBits() : |
898 | * Flushes the bits in the bit container @ index 0. |
899 | * |
900 | * @post bitPos will be < 8. |
901 | * @param kFast If kFast is set then we must know a-priori that |
902 | * the bit container will not overflow. |
903 | */ |
904 | FORCE_INLINE_TEMPLATE void HUF_flushBits(HUF_CStream_t* bitC, int kFast) |
905 | { |
906 | /* The upper bits of bitPos are noisy, so we must mask by 0xFF. */ |
907 | size_t const nbBits = bitC->bitPos[0] & 0xFF; |
908 | size_t const nbBytes = nbBits >> 3; |
909 | /* The top nbBits bits of bitContainer are the ones we need. */ |
910 | size_t const bitContainer = bitC->bitContainer[0] >> (HUF_BITS_IN_CONTAINER - nbBits); |
911 | /* Mask bitPos to account for the bytes we consumed. */ |
912 | bitC->bitPos[0] &= 7; |
913 | assert(nbBits > 0); |
914 | assert(nbBits <= sizeof(bitC->bitContainer[0]) * 8); |
915 | assert(bitC->ptr <= bitC->endPtr); |
916 | MEM_writeLEST(bitC->ptr, bitContainer); |
917 | bitC->ptr += nbBytes; |
918 | assert(!kFast || bitC->ptr <= bitC->endPtr); |
919 | if (!kFast && bitC->ptr > bitC->endPtr) bitC->ptr = bitC->endPtr; |
920 | /* bitContainer doesn't need to be modified because the leftover |
921 | * bits are already the top bitPos bits. And we don't care about |
922 | * noise in the lower values. |
923 | */ |
924 | } |
925 | |
926 | /*! HUF_endMark() |
927 | * @returns The Huffman stream end mark: A 1-bit value = 1. |
928 | */ |
929 | static HUF_CElt HUF_endMark(void) |
930 | { |
931 | HUF_CElt endMark; |
932 | HUF_setNbBits(&endMark, 1); |
933 | HUF_setValue(&endMark, 1); |
934 | return endMark; |
935 | } |
936 | |
937 | /*! HUF_closeCStream() : |
938 | * @return Size of CStream, in bytes, |
939 | * or 0 if it could not fit into dstBuffer */ |
940 | static size_t HUF_closeCStream(HUF_CStream_t* bitC) |
941 | { |
942 | HUF_addBits(bitC, HUF_endMark(), /* idx */ 0, /* kFast */ 0); |
943 | HUF_flushBits(bitC, /* kFast */ 0); |
944 | { |
945 | size_t const nbBits = bitC->bitPos[0] & 0xFF; |
946 | if (bitC->ptr >= bitC->endPtr) return 0; /* overflow detected */ |
947 | return (size_t)(bitC->ptr - bitC->startPtr) + (nbBits > 0); |
948 | } |
949 | } |
950 | |
951 | FORCE_INLINE_TEMPLATE void |
952 | HUF_encodeSymbol(HUF_CStream_t* bitCPtr, U32 symbol, const HUF_CElt* CTable, int idx, int fast) |
953 | { |
954 | HUF_addBits(bitCPtr, CTable[symbol], idx, fast); |
955 | } |
956 | |
957 | FORCE_INLINE_TEMPLATE void |
958 | HUF_compress1X_usingCTable_internal_body_loop(HUF_CStream_t* bitC, |
959 | const BYTE* ip, size_t srcSize, |
960 | const HUF_CElt* ct, |
961 | int kUnroll, int kFastFlush, int kLastFast) |
962 | { |
963 | /* Join to kUnroll */ |
964 | int n = (int)srcSize; |
965 | int rem = n % kUnroll; |
966 | if (rem > 0) { |
967 | for (; rem > 0; --rem) { |
968 | HUF_encodeSymbol(bitC, ip[--n], ct, 0, /* fast */ 0); |
969 | } |
970 | HUF_flushBits(bitC, kFastFlush); |
971 | } |
972 | assert(n % kUnroll == 0); |
973 | |
974 | /* Join to 2 * kUnroll */ |
975 | if (n % (2 * kUnroll)) { |
976 | int u; |
977 | for (u = 1; u < kUnroll; ++u) { |
978 | HUF_encodeSymbol(bitC, ip[n - u], ct, 0, 1); |
979 | } |
980 | HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, 0, kLastFast); |
981 | HUF_flushBits(bitC, kFastFlush); |
982 | n -= kUnroll; |
983 | } |
984 | assert(n % (2 * kUnroll) == 0); |
985 | |
986 | for (; n>0; n-= 2 * kUnroll) { |
987 | /* Encode kUnroll symbols into the bitstream @ index 0. */ |
988 | int u; |
989 | for (u = 1; u < kUnroll; ++u) { |
990 | HUF_encodeSymbol(bitC, ip[n - u], ct, /* idx */ 0, /* fast */ 1); |
991 | } |
992 | HUF_encodeSymbol(bitC, ip[n - kUnroll], ct, /* idx */ 0, /* fast */ kLastFast); |
993 | HUF_flushBits(bitC, kFastFlush); |
994 | /* Encode kUnroll symbols into the bitstream @ index 1. |
995 | * This allows us to start filling the bit container |
996 | * without any data dependencies. |
997 | */ |
998 | HUF_zeroIndex1(bitC); |
999 | for (u = 1; u < kUnroll; ++u) { |
1000 | HUF_encodeSymbol(bitC, ip[n - kUnroll - u], ct, /* idx */ 1, /* fast */ 1); |
1001 | } |
1002 | HUF_encodeSymbol(bitC, ip[n - kUnroll - kUnroll], ct, /* idx */ 1, /* fast */ kLastFast); |
1003 | /* Merge bitstream @ index 1 into the bitstream @ index 0 */ |
1004 | HUF_mergeIndex1(bitC); |
1005 | HUF_flushBits(bitC, kFastFlush); |
1006 | } |
1007 | assert(n == 0); |
1008 | |
1009 | } |
1010 | |
1011 | /** |
1012 | * Returns a tight upper bound on the output space needed by Huffman |
1013 | * with 8 bytes buffer to handle over-writes. If the output is at least |
1014 | * this large we don't need to do bounds checks during Huffman encoding. |
1015 | */ |
1016 | static size_t HUF_tightCompressBound(size_t srcSize, size_t tableLog) |
1017 | { |
1018 | return ((srcSize * tableLog) >> 3) + 8; |
1019 | } |
1020 | |
1021 | |
1022 | FORCE_INLINE_TEMPLATE size_t |
1023 | HUF_compress1X_usingCTable_internal_body(void* dst, size_t dstSize, |
1024 | const void* src, size_t srcSize, |
1025 | const HUF_CElt* CTable) |
1026 | { |
1027 | U32 const tableLog = (U32)CTable[0]; |
1028 | HUF_CElt const* ct = CTable + 1; |
1029 | const BYTE* ip = (const BYTE*) src; |
1030 | BYTE* const ostart = (BYTE*)dst; |
1031 | BYTE* const oend = ostart + dstSize; |
1032 | BYTE* op = ostart; |
1033 | HUF_CStream_t bitC; |
1034 | |
1035 | /* init */ |
1036 | if (dstSize < 8) return 0; /* not enough space to compress */ |
1037 | { size_t const initErr = HUF_initCStream(&bitC, op, (size_t)(oend-op)); |
1038 | if (HUF_isError(initErr)) return 0; } |
1039 | |
1040 | if (dstSize < HUF_tightCompressBound(srcSize, (size_t)tableLog) || tableLog > 11) |
1041 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ MEM_32bits() ? 2 : 4, /* kFast */ 0, /* kLastFast */ 0); |
1042 | else { |
1043 | if (MEM_32bits()) { |
1044 | switch (tableLog) { |
1045 | case 11: |
1046 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 0); |
1047 | break; |
1048 | case 10: ZSTD_FALLTHROUGH; |
1049 | case 9: ZSTD_FALLTHROUGH; |
1050 | case 8: |
1051 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 2, /* kFastFlush */ 1, /* kLastFast */ 1); |
1052 | break; |
1053 | case 7: ZSTD_FALLTHROUGH; |
1054 | default: |
1055 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 3, /* kFastFlush */ 1, /* kLastFast */ 1); |
1056 | break; |
1057 | } |
1058 | } else { |
1059 | switch (tableLog) { |
1060 | case 11: |
1061 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 0); |
1062 | break; |
1063 | case 10: |
1064 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 5, /* kFastFlush */ 1, /* kLastFast */ 1); |
1065 | break; |
1066 | case 9: |
1067 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 6, /* kFastFlush */ 1, /* kLastFast */ 0); |
1068 | break; |
1069 | case 8: |
1070 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 7, /* kFastFlush */ 1, /* kLastFast */ 0); |
1071 | break; |
1072 | case 7: |
1073 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 8, /* kFastFlush */ 1, /* kLastFast */ 0); |
1074 | break; |
1075 | case 6: ZSTD_FALLTHROUGH; |
1076 | default: |
1077 | HUF_compress1X_usingCTable_internal_body_loop(&bitC, ip, srcSize, ct, /* kUnroll */ 9, /* kFastFlush */ 1, /* kLastFast */ 1); |
1078 | break; |
1079 | } |
1080 | } |
1081 | } |
1082 | assert(bitC.ptr <= bitC.endPtr); |
1083 | |
1084 | return HUF_closeCStream(&bitC); |
1085 | } |
1086 | |
1087 | #if DYNAMIC_BMI2 |
1088 | |
1089 | static BMI2_TARGET_ATTRIBUTE size_t |
1090 | HUF_compress1X_usingCTable_internal_bmi2(void* dst, size_t dstSize, |
1091 | const void* src, size_t srcSize, |
1092 | const HUF_CElt* CTable) |
1093 | { |
1094 | return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); |
1095 | } |
1096 | |
1097 | static size_t |
1098 | HUF_compress1X_usingCTable_internal_default(void* dst, size_t dstSize, |
1099 | const void* src, size_t srcSize, |
1100 | const HUF_CElt* CTable) |
1101 | { |
1102 | return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); |
1103 | } |
1104 | |
1105 | static size_t |
1106 | HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, |
1107 | const void* src, size_t srcSize, |
1108 | const HUF_CElt* CTable, const int flags) |
1109 | { |
1110 | if (flags & HUF_flags_bmi2) { |
1111 | return HUF_compress1X_usingCTable_internal_bmi2(dst, dstSize, src, srcSize, CTable); |
1112 | } |
1113 | return HUF_compress1X_usingCTable_internal_default(dst, dstSize, src, srcSize, CTable); |
1114 | } |
1115 | |
1116 | #else |
1117 | |
1118 | static size_t |
1119 | HUF_compress1X_usingCTable_internal(void* dst, size_t dstSize, |
1120 | const void* src, size_t srcSize, |
1121 | const HUF_CElt* CTable, const int flags) |
1122 | { |
1123 | (void)flags; |
1124 | return HUF_compress1X_usingCTable_internal_body(dst, dstSize, src, srcSize, CTable); |
1125 | } |
1126 | |
1127 | #endif |
1128 | |
1129 | size_t HUF_compress1X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) |
1130 | { |
1131 | return HUF_compress1X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); |
1132 | } |
1133 | |
1134 | static size_t |
1135 | HUF_compress4X_usingCTable_internal(void* dst, size_t dstSize, |
1136 | const void* src, size_t srcSize, |
1137 | const HUF_CElt* CTable, int flags) |
1138 | { |
1139 | size_t const segmentSize = (srcSize+3)/4; /* first 3 segments */ |
1140 | const BYTE* ip = (const BYTE*) src; |
1141 | const BYTE* const iend = ip + srcSize; |
1142 | BYTE* const ostart = (BYTE*) dst; |
1143 | BYTE* const oend = ostart + dstSize; |
1144 | BYTE* op = ostart; |
1145 | |
1146 | if (dstSize < 6 + 1 + 1 + 1 + 8) return 0; /* minimum space to compress successfully */ |
1147 | if (srcSize < 12) return 0; /* no saving possible : too small input */ |
1148 | op += 6; /* jumpTable */ |
1149 | |
1150 | assert(op <= oend); |
1151 | { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); |
1152 | if (cSize == 0 || cSize > 65535) return 0; |
1153 | MEM_writeLE16(ostart, (U16)cSize); |
1154 | op += cSize; |
1155 | } |
1156 | |
1157 | ip += segmentSize; |
1158 | assert(op <= oend); |
1159 | { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); |
1160 | if (cSize == 0 || cSize > 65535) return 0; |
1161 | MEM_writeLE16(ostart+2, (U16)cSize); |
1162 | op += cSize; |
1163 | } |
1164 | |
1165 | ip += segmentSize; |
1166 | assert(op <= oend); |
1167 | { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, segmentSize, CTable, flags) ); |
1168 | if (cSize == 0 || cSize > 65535) return 0; |
1169 | MEM_writeLE16(ostart+4, (U16)cSize); |
1170 | op += cSize; |
1171 | } |
1172 | |
1173 | ip += segmentSize; |
1174 | assert(op <= oend); |
1175 | assert(ip <= iend); |
1176 | { CHECK_V_F(cSize, HUF_compress1X_usingCTable_internal(op, (size_t)(oend-op), ip, (size_t)(iend-ip), CTable, flags) ); |
1177 | if (cSize == 0 || cSize > 65535) return 0; |
1178 | op += cSize; |
1179 | } |
1180 | |
1181 | return (size_t)(op-ostart); |
1182 | } |
1183 | |
1184 | size_t HUF_compress4X_usingCTable(void* dst, size_t dstSize, const void* src, size_t srcSize, const HUF_CElt* CTable, int flags) |
1185 | { |
1186 | return HUF_compress4X_usingCTable_internal(dst, dstSize, src, srcSize, CTable, flags); |
1187 | } |
1188 | |
1189 | typedef enum { HUF_singleStream, HUF_fourStreams } HUF_nbStreams_e; |
1190 | |
1191 | static size_t HUF_compressCTable_internal( |
1192 | BYTE* const ostart, BYTE* op, BYTE* const oend, |
1193 | const void* src, size_t srcSize, |
1194 | HUF_nbStreams_e nbStreams, const HUF_CElt* CTable, const int flags) |
1195 | { |
1196 | size_t const cSize = (nbStreams==HUF_singleStream) ? |
1197 | HUF_compress1X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags) : |
1198 | HUF_compress4X_usingCTable_internal(op, (size_t)(oend - op), src, srcSize, CTable, flags); |
1199 | if (HUF_isError(cSize)) { return cSize; } |
1200 | if (cSize==0) { return 0; } /* uncompressible */ |
1201 | op += cSize; |
1202 | /* check compressibility */ |
1203 | assert(op >= ostart); |
1204 | if ((size_t)(op-ostart) >= srcSize-1) { return 0; } |
1205 | return (size_t)(op-ostart); |
1206 | } |
1207 | |
1208 | typedef struct { |
1209 | unsigned count[HUF_SYMBOLVALUE_MAX + 1]; |
1210 | HUF_CElt CTable[HUF_CTABLE_SIZE_ST(HUF_SYMBOLVALUE_MAX)]; |
1211 | union { |
1212 | HUF_buildCTable_wksp_tables buildCTable_wksp; |
1213 | HUF_WriteCTableWksp writeCTable_wksp; |
1214 | U32 hist_wksp[HIST_WKSP_SIZE_U32]; |
1215 | } wksps; |
1216 | } HUF_compress_tables_t; |
1217 | |
1218 | #define SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE 4096 |
1219 | #define SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO 10 /* Must be >= 2 */ |
1220 | |
1221 | unsigned HUF_cardinality(const unsigned* count, unsigned maxSymbolValue) |
1222 | { |
1223 | unsigned cardinality = 0; |
1224 | unsigned i; |
1225 | |
1226 | for (i = 0; i < maxSymbolValue + 1; i++) { |
1227 | if (count[i] != 0) cardinality += 1; |
1228 | } |
1229 | |
1230 | return cardinality; |
1231 | } |
1232 | |
1233 | unsigned HUF_minTableLog(unsigned symbolCardinality) |
1234 | { |
1235 | U32 minBitsSymbols = ZSTD_highbit32(symbolCardinality) + 1; |
1236 | return minBitsSymbols; |
1237 | } |
1238 | |
1239 | unsigned HUF_optimalTableLog( |
1240 | unsigned maxTableLog, |
1241 | size_t srcSize, |
1242 | unsigned maxSymbolValue, |
1243 | void* workSpace, size_t wkspSize, |
1244 | HUF_CElt* table, |
1245 | const unsigned* count, |
1246 | int flags) |
1247 | { |
1248 | assert(srcSize > 1); /* Not supported, RLE should be used instead */ |
1249 | assert(wkspSize >= sizeof(HUF_buildCTable_wksp_tables)); |
1250 | |
1251 | if (!(flags & HUF_flags_optimalDepth)) { |
1252 | /* cheap evaluation, based on FSE */ |
1253 | return FSE_optimalTableLog_internal(maxTableLog, srcSize, maxSymbolValue, 1); |
1254 | } |
1255 | |
1256 | { BYTE* dst = (BYTE*)workSpace + sizeof(HUF_WriteCTableWksp); |
1257 | size_t dstSize = wkspSize - sizeof(HUF_WriteCTableWksp); |
1258 | size_t maxBits, hSize, newSize; |
1259 | const unsigned symbolCardinality = HUF_cardinality(count, maxSymbolValue); |
1260 | const unsigned minTableLog = HUF_minTableLog(symbolCardinality); |
1261 | size_t optSize = ((size_t) ~0) - 1; |
1262 | unsigned optLog = maxTableLog, optLogGuess; |
1263 | |
1264 | DEBUGLOG(6, "HUF_optimalTableLog: probing huf depth (srcSize=%zu)" , srcSize); |
1265 | |
1266 | /* Search until size increases */ |
1267 | for (optLogGuess = minTableLog; optLogGuess <= maxTableLog; optLogGuess++) { |
1268 | DEBUGLOG(7, "checking for huffLog=%u" , optLogGuess); |
1269 | maxBits = HUF_buildCTable_wksp(table, count, maxSymbolValue, optLogGuess, workSpace, wkspSize); |
1270 | if (ERR_isError(maxBits)) continue; |
1271 | |
1272 | if (maxBits < optLogGuess && optLogGuess > minTableLog) break; |
1273 | |
1274 | hSize = HUF_writeCTable_wksp(dst, dstSize, table, maxSymbolValue, (U32)maxBits, workSpace, wkspSize); |
1275 | |
1276 | if (ERR_isError(hSize)) continue; |
1277 | |
1278 | newSize = HUF_estimateCompressedSize(table, count, maxSymbolValue) + hSize; |
1279 | |
1280 | if (newSize > optSize + 1) { |
1281 | break; |
1282 | } |
1283 | |
1284 | if (newSize < optSize) { |
1285 | optSize = newSize; |
1286 | optLog = optLogGuess; |
1287 | } |
1288 | } |
1289 | assert(optLog <= HUF_TABLELOG_MAX); |
1290 | return optLog; |
1291 | } |
1292 | } |
1293 | |
1294 | /* HUF_compress_internal() : |
1295 | * `workSpace_align4` must be aligned on 4-bytes boundaries, |
1296 | * and occupies the same space as a table of HUF_WORKSPACE_SIZE_U64 unsigned */ |
1297 | static size_t |
1298 | HUF_compress_internal (void* dst, size_t dstSize, |
1299 | const void* src, size_t srcSize, |
1300 | unsigned maxSymbolValue, unsigned huffLog, |
1301 | HUF_nbStreams_e nbStreams, |
1302 | void* workSpace, size_t wkspSize, |
1303 | HUF_CElt* oldHufTable, HUF_repeat* repeat, int flags) |
1304 | { |
1305 | HUF_compress_tables_t* const table = (HUF_compress_tables_t*)HUF_alignUpWorkspace(workSpace, &wkspSize, ZSTD_ALIGNOF(size_t)); |
1306 | BYTE* const ostart = (BYTE*)dst; |
1307 | BYTE* const oend = ostart + dstSize; |
1308 | BYTE* op = ostart; |
1309 | |
1310 | DEBUGLOG(5, "HUF_compress_internal (srcSize=%zu)" , srcSize); |
1311 | HUF_STATIC_ASSERT(sizeof(*table) + HUF_WORKSPACE_MAX_ALIGNMENT <= HUF_WORKSPACE_SIZE); |
1312 | |
1313 | /* checks & inits */ |
1314 | if (wkspSize < sizeof(*table)) return ERROR(workSpace_tooSmall); |
1315 | if (!srcSize) return 0; /* Uncompressed */ |
1316 | if (!dstSize) return 0; /* cannot fit anything within dst budget */ |
1317 | if (srcSize > HUF_BLOCKSIZE_MAX) return ERROR(srcSize_wrong); /* current block size limit */ |
1318 | if (huffLog > HUF_TABLELOG_MAX) return ERROR(tableLog_tooLarge); |
1319 | if (maxSymbolValue > HUF_SYMBOLVALUE_MAX) return ERROR(maxSymbolValue_tooLarge); |
1320 | if (!maxSymbolValue) maxSymbolValue = HUF_SYMBOLVALUE_MAX; |
1321 | if (!huffLog) huffLog = HUF_TABLELOG_DEFAULT; |
1322 | |
1323 | /* Heuristic : If old table is valid, use it for small inputs */ |
1324 | if ((flags & HUF_flags_preferRepeat) && repeat && *repeat == HUF_repeat_valid) { |
1325 | return HUF_compressCTable_internal(ostart, op, oend, |
1326 | src, srcSize, |
1327 | nbStreams, oldHufTable, flags); |
1328 | } |
1329 | |
1330 | /* If uncompressible data is suspected, do a smaller sampling first */ |
1331 | DEBUG_STATIC_ASSERT(SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO >= 2); |
1332 | if ((flags & HUF_flags_suspectUncompressible) && srcSize >= (SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE * SUSPECT_INCOMPRESSIBLE_SAMPLE_RATIO)) { |
1333 | size_t largestTotal = 0; |
1334 | DEBUGLOG(5, "input suspected incompressible : sampling to check" ); |
1335 | { unsigned maxSymbolValueBegin = maxSymbolValue; |
1336 | CHECK_V_F(largestBegin, HIST_count_simple (table->count, &maxSymbolValueBegin, (const BYTE*)src, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); |
1337 | largestTotal += largestBegin; |
1338 | } |
1339 | { unsigned maxSymbolValueEnd = maxSymbolValue; |
1340 | CHECK_V_F(largestEnd, HIST_count_simple (table->count, &maxSymbolValueEnd, (const BYTE*)src + srcSize - SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE, SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) ); |
1341 | largestTotal += largestEnd; |
1342 | } |
1343 | if (largestTotal <= ((2 * SUSPECT_INCOMPRESSIBLE_SAMPLE_SIZE) >> 7)+4) return 0; /* heuristic : probably not compressible enough */ |
1344 | } |
1345 | |
1346 | /* Scan input and build symbol stats */ |
1347 | { CHECK_V_F(largest, HIST_count_wksp (table->count, &maxSymbolValue, (const BYTE*)src, srcSize, table->wksps.hist_wksp, sizeof(table->wksps.hist_wksp)) ); |
1348 | if (largest == srcSize) { *ostart = ((const BYTE*)src)[0]; return 1; } /* single symbol, rle */ |
1349 | if (largest <= (srcSize >> 7)+4) return 0; /* heuristic : probably not compressible enough */ |
1350 | } |
1351 | DEBUGLOG(6, "histogram detail completed (%zu symbols)" , showU32(table->count, maxSymbolValue+1)); |
1352 | |
1353 | /* Check validity of previous table */ |
1354 | if ( repeat |
1355 | && *repeat == HUF_repeat_check |
1356 | && !HUF_validateCTable(oldHufTable, table->count, maxSymbolValue)) { |
1357 | *repeat = HUF_repeat_none; |
1358 | } |
1359 | /* Heuristic : use existing table for small inputs */ |
1360 | if ((flags & HUF_flags_preferRepeat) && repeat && *repeat != HUF_repeat_none) { |
1361 | return HUF_compressCTable_internal(ostart, op, oend, |
1362 | src, srcSize, |
1363 | nbStreams, oldHufTable, flags); |
1364 | } |
1365 | |
1366 | /* Build Huffman Tree */ |
1367 | huffLog = HUF_optimalTableLog(huffLog, srcSize, maxSymbolValue, &table->wksps, sizeof(table->wksps), table->CTable, table->count, flags); |
1368 | { size_t const maxBits = HUF_buildCTable_wksp(table->CTable, table->count, |
1369 | maxSymbolValue, huffLog, |
1370 | &table->wksps.buildCTable_wksp, sizeof(table->wksps.buildCTable_wksp)); |
1371 | CHECK_F(maxBits); |
1372 | huffLog = (U32)maxBits; |
1373 | DEBUGLOG(6, "bit distribution completed (%zu symbols)" , showCTableBits(table->CTable + 1, maxSymbolValue+1)); |
1374 | } |
1375 | /* Zero unused symbols in CTable, so we can check it for validity */ |
1376 | { |
1377 | size_t const ctableSize = HUF_CTABLE_SIZE_ST(maxSymbolValue); |
1378 | size_t const unusedSize = sizeof(table->CTable) - ctableSize * sizeof(HUF_CElt); |
1379 | ZSTD_memset(table->CTable + ctableSize, 0, unusedSize); |
1380 | } |
1381 | |
1382 | /* Write table description header */ |
1383 | { CHECK_V_F(hSize, HUF_writeCTable_wksp(op, dstSize, table->CTable, maxSymbolValue, huffLog, |
1384 | &table->wksps.writeCTable_wksp, sizeof(table->wksps.writeCTable_wksp)) ); |
1385 | /* Check if using previous huffman table is beneficial */ |
1386 | if (repeat && *repeat != HUF_repeat_none) { |
1387 | size_t const oldSize = HUF_estimateCompressedSize(oldHufTable, table->count, maxSymbolValue); |
1388 | size_t const newSize = HUF_estimateCompressedSize(table->CTable, table->count, maxSymbolValue); |
1389 | if (oldSize <= hSize + newSize || hSize + 12 >= srcSize) { |
1390 | return HUF_compressCTable_internal(ostart, op, oend, |
1391 | src, srcSize, |
1392 | nbStreams, oldHufTable, flags); |
1393 | } } |
1394 | |
1395 | /* Use the new huffman table */ |
1396 | if (hSize + 12ul >= srcSize) { return 0; } |
1397 | op += hSize; |
1398 | if (repeat) { *repeat = HUF_repeat_none; } |
1399 | if (oldHufTable) |
1400 | ZSTD_memcpy(oldHufTable, table->CTable, sizeof(table->CTable)); /* Save new table */ |
1401 | } |
1402 | return HUF_compressCTable_internal(ostart, op, oend, |
1403 | src, srcSize, |
1404 | nbStreams, table->CTable, flags); |
1405 | } |
1406 | |
1407 | size_t HUF_compress1X_repeat (void* dst, size_t dstSize, |
1408 | const void* src, size_t srcSize, |
1409 | unsigned maxSymbolValue, unsigned huffLog, |
1410 | void* workSpace, size_t wkspSize, |
1411 | HUF_CElt* hufTable, HUF_repeat* repeat, int flags) |
1412 | { |
1413 | DEBUGLOG(5, "HUF_compress1X_repeat (srcSize = %zu)" , srcSize); |
1414 | return HUF_compress_internal(dst, dstSize, src, srcSize, |
1415 | maxSymbolValue, huffLog, HUF_singleStream, |
1416 | workSpace, wkspSize, hufTable, |
1417 | repeat, flags); |
1418 | } |
1419 | |
1420 | /* HUF_compress4X_repeat(): |
1421 | * compress input using 4 streams. |
1422 | * consider skipping quickly |
1423 | * re-use an existing huffman compression table */ |
1424 | size_t HUF_compress4X_repeat (void* dst, size_t dstSize, |
1425 | const void* src, size_t srcSize, |
1426 | unsigned maxSymbolValue, unsigned huffLog, |
1427 | void* workSpace, size_t wkspSize, |
1428 | HUF_CElt* hufTable, HUF_repeat* repeat, int flags) |
1429 | { |
1430 | DEBUGLOG(5, "HUF_compress4X_repeat (srcSize = %zu)" , srcSize); |
1431 | return HUF_compress_internal(dst, dstSize, src, srcSize, |
1432 | maxSymbolValue, huffLog, HUF_fourStreams, |
1433 | workSpace, wkspSize, |
1434 | hufTable, repeat, flags); |
1435 | } |
1436 | |