1/*
2 * Copyright (c) 2016-present, Yann Collet, Facebook, Inc.
3 * All rights reserved.
4 *
5 * This source code is licensed under both the BSD-style license (found in the
6 * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7 * in the COPYING file in the root directory of this source tree).
8 * You may select, at your option, one of the above-listed licenses.
9 */
10
11
12/* ***************************************************************
13* Tuning parameters
14*****************************************************************/
15/*!
16 * HEAPMODE :
17 * Select how default decompression function ZSTD_decompress() allocates its context,
18 * on stack (0), or into heap (1, default; requires malloc()).
19 * Note that functions with explicit context such as ZSTD_decompressDCtx() are unaffected.
20 */
21#ifndef ZSTD_HEAPMODE
22# define ZSTD_HEAPMODE 1
23#endif
24
25/*!
26* LEGACY_SUPPORT :
27* if set to 1+, ZSTD_decompress() can decode older formats (v0.1+)
28*/
29#ifndef ZSTD_LEGACY_SUPPORT
30# define ZSTD_LEGACY_SUPPORT 0
31#endif
32
33/*!
34 * MAXWINDOWSIZE_DEFAULT :
35 * maximum window size accepted by DStream __by default__.
36 * Frames requiring more memory will be rejected.
37 * It's possible to set a different limit using ZSTD_DCtx_setMaxWindowSize().
38 */
39#ifndef ZSTD_MAXWINDOWSIZE_DEFAULT
40# define ZSTD_MAXWINDOWSIZE_DEFAULT (((U32)1 << ZSTD_WINDOWLOG_DEFAULTMAX) + 1)
41#endif
42
43
44/*-*******************************************************
45* Dependencies
46*********************************************************/
47#include <string.h> /* memcpy, memmove, memset */
48#include "cpu.h"
49#include "mem.h" /* low level memory routines */
50#define FSE_STATIC_LINKING_ONLY
51#include "fse.h"
52#define HUF_STATIC_LINKING_ONLY
53#include "huf.h"
54#include "zstd_internal.h"
55
56#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
57# include "zstd_legacy.h"
58#endif
59
60
61/*-*************************************
62* Errors
63***************************************/
64#define ZSTD_isError ERR_isError /* for inlining */
65#define FSE_isError ERR_isError
66#define HUF_isError ERR_isError
67
68
69/*_*******************************************************
70* Memory operations
71**********************************************************/
72static void ZSTD_copy4(void* dst, const void* src) { memcpy(dst, src, 4); }
73
74
75/*-*************************************************************
76* Context management
77***************************************************************/
78typedef enum { ZSTDds_getFrameHeaderSize, ZSTDds_decodeFrameHeader,
79 ZSTDds_decodeBlockHeader, ZSTDds_decompressBlock,
80 ZSTDds_decompressLastBlock, ZSTDds_checkChecksum,
81 ZSTDds_decodeSkippableHeader, ZSTDds_skipFrame } ZSTD_dStage;
82
83typedef enum { zdss_init=0, zdss_loadHeader,
84 zdss_read, zdss_load, zdss_flush } ZSTD_dStreamStage;
85
86
87typedef struct {
88 U32 fastMode;
89 U32 tableLog;
90} ZSTD_seqSymbol_header;
91
92typedef struct {
93 U16 nextState;
94 BYTE nbAdditionalBits;
95 BYTE nbBits;
96 U32 baseValue;
97} ZSTD_seqSymbol;
98
99#define SEQSYMBOL_TABLE_SIZE(log) (1 + (1 << (log)))
100
101typedef struct {
102 ZSTD_seqSymbol LLTable[SEQSYMBOL_TABLE_SIZE(LLFSELog)];
103 ZSTD_seqSymbol OFTable[SEQSYMBOL_TABLE_SIZE(OffFSELog)];
104 ZSTD_seqSymbol MLTable[SEQSYMBOL_TABLE_SIZE(MLFSELog)];
105 HUF_DTable hufTable[HUF_DTABLE_SIZE(HufLog)]; /* can accommodate HUF_decompress4X */
106 U32 workspace[HUF_DECOMPRESS_WORKSPACE_SIZE_U32];
107 U32 rep[ZSTD_REP_NUM];
108} ZSTD_entropyDTables_t;
109
110struct ZSTD_DCtx_s
111{
112 const ZSTD_seqSymbol* LLTptr;
113 const ZSTD_seqSymbol* MLTptr;
114 const ZSTD_seqSymbol* OFTptr;
115 const HUF_DTable* HUFptr;
116 ZSTD_entropyDTables_t entropy;
117 const void* previousDstEnd; /* detect continuity */
118 const void* base; /* start of current segment */
119 const void* vBase; /* virtual start of previous segment if it was just before current one */
120 const void* dictEnd; /* end of previous segment */
121 size_t expected;
122 ZSTD_frameHeader fParams;
123 U64 decodedSize;
124 blockType_e bType; /* used in ZSTD_decompressContinue(), store blockType between block header decoding and block decompression stages */
125 ZSTD_dStage stage;
126 U32 litEntropy;
127 U32 fseEntropy;
128 XXH64_state_t xxhState;
129 size_t headerSize;
130 U32 dictID;
131 ZSTD_format_e format;
132 const BYTE* litPtr;
133 ZSTD_customMem customMem;
134 size_t litSize;
135 size_t rleSize;
136 size_t staticSize;
137 int bmi2; /* == 1 if the CPU supports BMI2 and 0 otherwise. CPU support is determined dynamically once per context lifetime. */
138
139 /* streaming */
140 ZSTD_DDict* ddictLocal;
141 const ZSTD_DDict* ddict;
142 ZSTD_dStreamStage streamStage;
143 char* inBuff;
144 size_t inBuffSize;
145 size_t inPos;
146 size_t maxWindowSize;
147 char* outBuff;
148 size_t outBuffSize;
149 size_t outStart;
150 size_t outEnd;
151 size_t lhSize;
152 void* legacyContext;
153 U32 previousLegacyVersion;
154 U32 legacyVersion;
155 U32 hostageByte;
156
157 /* workspace */
158 BYTE litBuffer[ZSTD_BLOCKSIZE_MAX + WILDCOPY_OVERLENGTH];
159 BYTE headerBuffer[ZSTD_FRAMEHEADERSIZE_MAX];
160}; /* typedef'd to ZSTD_DCtx within "zstd.h" */
161
162size_t ZSTD_sizeof_DCtx (const ZSTD_DCtx* dctx)
163{
164 if (dctx==NULL) return 0; /* support sizeof NULL */
165 return sizeof(*dctx)
166 + ZSTD_sizeof_DDict(dctx->ddictLocal)
167 + dctx->inBuffSize + dctx->outBuffSize;
168}
169
170size_t ZSTD_estimateDCtxSize(void) { return sizeof(ZSTD_DCtx); }
171
172
173static size_t ZSTD_startingInputLength(ZSTD_format_e format)
174{
175 size_t const startingInputLength = (format==ZSTD_f_zstd1_magicless) ?
176 ZSTD_frameHeaderSize_prefix - ZSTD_frameIdSize :
177 ZSTD_frameHeaderSize_prefix;
178 ZSTD_STATIC_ASSERT(ZSTD_FRAMEHEADERSIZE_PREFIX >= ZSTD_FRAMEIDSIZE);
179 /* only supports formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless */
180 assert( (format == ZSTD_f_zstd1) || (format == ZSTD_f_zstd1_magicless) );
181 return startingInputLength;
182}
183
184static void ZSTD_initDCtx_internal(ZSTD_DCtx* dctx)
185{
186 dctx->format = ZSTD_f_zstd1; /* ZSTD_decompressBegin() invokes ZSTD_startingInputLength() with argument dctx->format */
187 dctx->staticSize = 0;
188 dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
189 dctx->ddict = NULL;
190 dctx->ddictLocal = NULL;
191 dctx->inBuff = NULL;
192 dctx->inBuffSize = 0;
193 dctx->outBuffSize = 0;
194 dctx->streamStage = zdss_init;
195 dctx->bmi2 = ZSTD_cpuid_bmi2(ZSTD_cpuid());
196}
197
198ZSTD_DCtx* ZSTD_initStaticDCtx(void *workspace, size_t workspaceSize)
199{
200 ZSTD_DCtx* const dctx = (ZSTD_DCtx*) workspace;
201
202 if ((size_t)workspace & 7) return NULL; /* 8-aligned */
203 if (workspaceSize < sizeof(ZSTD_DCtx)) return NULL; /* minimum size */
204
205 ZSTD_initDCtx_internal(dctx);
206 dctx->staticSize = workspaceSize;
207 dctx->inBuff = (char*)(dctx+1);
208 return dctx;
209}
210
211ZSTD_DCtx* ZSTD_createDCtx_advanced(ZSTD_customMem customMem)
212{
213 if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
214
215 { ZSTD_DCtx* const dctx = (ZSTD_DCtx*)ZSTD_malloc(sizeof(*dctx), customMem);
216 if (!dctx) return NULL;
217 dctx->customMem = customMem;
218 dctx->legacyContext = NULL;
219 dctx->previousLegacyVersion = 0;
220 ZSTD_initDCtx_internal(dctx);
221 return dctx;
222 }
223}
224
225ZSTD_DCtx* ZSTD_createDCtx(void)
226{
227 DEBUGLOG(3, "ZSTD_createDCtx");
228 return ZSTD_createDCtx_advanced(ZSTD_defaultCMem);
229}
230
231size_t ZSTD_freeDCtx(ZSTD_DCtx* dctx)
232{
233 if (dctx==NULL) return 0; /* support free on NULL */
234 if (dctx->staticSize) return ERROR(memory_allocation); /* not compatible with static DCtx */
235 { ZSTD_customMem const cMem = dctx->customMem;
236 ZSTD_freeDDict(dctx->ddictLocal);
237 dctx->ddictLocal = NULL;
238 ZSTD_free(dctx->inBuff, cMem);
239 dctx->inBuff = NULL;
240#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
241 if (dctx->legacyContext)
242 ZSTD_freeLegacyStreamContext(dctx->legacyContext, dctx->previousLegacyVersion);
243#endif
244 ZSTD_free(dctx, cMem);
245 return 0;
246 }
247}
248
249/* no longer useful */
250void ZSTD_copyDCtx(ZSTD_DCtx* dstDCtx, const ZSTD_DCtx* srcDCtx)
251{
252 size_t const toCopy = (size_t)((char*)(&dstDCtx->inBuff) - (char*)dstDCtx);
253 memcpy(dstDCtx, srcDCtx, toCopy); /* no need to copy workspace */
254}
255
256
257/*-*************************************************************
258 * Frame header decoding
259 ***************************************************************/
260
261/*! ZSTD_isFrame() :
262 * Tells if the content of `buffer` starts with a valid Frame Identifier.
263 * Note : Frame Identifier is 4 bytes. If `size < 4`, @return will always be 0.
264 * Note 2 : Legacy Frame Identifiers are considered valid only if Legacy Support is enabled.
265 * Note 3 : Skippable Frame Identifiers are considered valid. */
266unsigned ZSTD_isFrame(const void* buffer, size_t size)
267{
268 if (size < ZSTD_frameIdSize) return 0;
269 { U32 const magic = MEM_readLE32(buffer);
270 if (magic == ZSTD_MAGICNUMBER) return 1;
271 if ((magic & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) return 1;
272 }
273#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
274 if (ZSTD_isLegacy(buffer, size)) return 1;
275#endif
276 return 0;
277}
278
279/** ZSTD_frameHeaderSize_internal() :
280 * srcSize must be large enough to reach header size fields.
281 * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless.
282 * @return : size of the Frame Header
283 * or an error code, which can be tested with ZSTD_isError() */
284static size_t ZSTD_frameHeaderSize_internal(const void* src, size_t srcSize, ZSTD_format_e format)
285{
286 size_t const minInputSize = ZSTD_startingInputLength(format);
287 if (srcSize < minInputSize) return ERROR(srcSize_wrong);
288
289 { BYTE const fhd = ((const BYTE*)src)[minInputSize-1];
290 U32 const dictID= fhd & 3;
291 U32 const singleSegment = (fhd >> 5) & 1;
292 U32 const fcsId = fhd >> 6;
293 return minInputSize + !singleSegment
294 + ZSTD_did_fieldSize[dictID] + ZSTD_fcs_fieldSize[fcsId]
295 + (singleSegment && !fcsId);
296 }
297}
298
299/** ZSTD_frameHeaderSize() :
300 * srcSize must be >= ZSTD_frameHeaderSize_prefix.
301 * @return : size of the Frame Header */
302size_t ZSTD_frameHeaderSize(const void* src, size_t srcSize)
303{
304 return ZSTD_frameHeaderSize_internal(src, srcSize, ZSTD_f_zstd1);
305}
306
307
308/** ZSTD_getFrameHeader_internal() :
309 * decode Frame Header, or require larger `srcSize`.
310 * note : only works for formats ZSTD_f_zstd1 and ZSTD_f_zstd1_magicless
311 * @return : 0, `zfhPtr` is correctly filled,
312 * >0, `srcSize` is too small, value is wanted `srcSize` amount,
313 * or an error code, which can be tested using ZSTD_isError() */
314static size_t ZSTD_getFrameHeader_internal(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize, ZSTD_format_e format)
315{
316 const BYTE* ip = (const BYTE*)src;
317 size_t const minInputSize = ZSTD_startingInputLength(format);
318
319 if (srcSize < minInputSize) return minInputSize;
320
321 if ( (format != ZSTD_f_zstd1_magicless)
322 && (MEM_readLE32(src) != ZSTD_MAGICNUMBER) ) {
323 if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
324 /* skippable frame */
325 if (srcSize < ZSTD_skippableHeaderSize)
326 return ZSTD_skippableHeaderSize; /* magic number + frame length */
327 memset(zfhPtr, 0, sizeof(*zfhPtr));
328 zfhPtr->frameContentSize = MEM_readLE32((const char *)src + ZSTD_frameIdSize);
329 zfhPtr->frameType = ZSTD_skippableFrame;
330 return 0;
331 }
332 return ERROR(prefix_unknown);
333 }
334
335 /* ensure there is enough `srcSize` to fully read/decode frame header */
336 { size_t const fhsize = ZSTD_frameHeaderSize_internal(src, srcSize, format);
337 if (srcSize < fhsize) return fhsize;
338 zfhPtr->headerSize = (U32)fhsize;
339 }
340
341 { BYTE const fhdByte = ip[minInputSize-1];
342 size_t pos = minInputSize;
343 U32 const dictIDSizeCode = fhdByte&3;
344 U32 const checksumFlag = (fhdByte>>2)&1;
345 U32 const singleSegment = (fhdByte>>5)&1;
346 U32 const fcsID = fhdByte>>6;
347 U64 windowSize = 0;
348 U32 dictID = 0;
349 U64 frameContentSize = ZSTD_CONTENTSIZE_UNKNOWN;
350 if ((fhdByte & 0x08) != 0)
351 return ERROR(frameParameter_unsupported); /* reserved bits, must be zero */
352
353 if (!singleSegment) {
354 BYTE const wlByte = ip[pos++];
355 U32 const windowLog = (wlByte >> 3) + ZSTD_WINDOWLOG_ABSOLUTEMIN;
356 if (windowLog > ZSTD_WINDOWLOG_MAX)
357 return ERROR(frameParameter_windowTooLarge);
358 windowSize = (1ULL << windowLog);
359 windowSize += (windowSize >> 3) * (wlByte&7);
360 }
361 switch(dictIDSizeCode)
362 {
363 default: assert(0); /* impossible */
364 case 0 : break;
365 case 1 : dictID = ip[pos]; pos++; break;
366 case 2 : dictID = MEM_readLE16(ip+pos); pos+=2; break;
367 case 3 : dictID = MEM_readLE32(ip+pos); pos+=4; break;
368 }
369 switch(fcsID)
370 {
371 default: assert(0); /* impossible */
372 case 0 : if (singleSegment) frameContentSize = ip[pos]; break;
373 case 1 : frameContentSize = MEM_readLE16(ip+pos)+256; break;
374 case 2 : frameContentSize = MEM_readLE32(ip+pos); break;
375 case 3 : frameContentSize = MEM_readLE64(ip+pos); break;
376 }
377 if (singleSegment) windowSize = frameContentSize;
378
379 zfhPtr->frameType = ZSTD_frame;
380 zfhPtr->frameContentSize = frameContentSize;
381 zfhPtr->windowSize = windowSize;
382 zfhPtr->blockSizeMax = (unsigned) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
383 zfhPtr->dictID = dictID;
384 zfhPtr->checksumFlag = checksumFlag;
385 }
386 return 0;
387}
388
389/** ZSTD_getFrameHeader() :
390 * decode Frame Header, or require larger `srcSize`.
391 * note : this function does not consume input, it only reads it.
392 * @return : 0, `zfhPtr` is correctly filled,
393 * >0, `srcSize` is too small, value is wanted `srcSize` amount,
394 * or an error code, which can be tested using ZSTD_isError() */
395size_t ZSTD_getFrameHeader(ZSTD_frameHeader* zfhPtr, const void* src, size_t srcSize)
396{
397 return ZSTD_getFrameHeader_internal(zfhPtr, src, srcSize, ZSTD_f_zstd1);
398}
399
400
401/** ZSTD_getFrameContentSize() :
402 * compatible with legacy mode
403 * @return : decompressed size of the single frame pointed to be `src` if known, otherwise
404 * - ZSTD_CONTENTSIZE_UNKNOWN if the size cannot be determined
405 * - ZSTD_CONTENTSIZE_ERROR if an error occurred (e.g. invalid magic number, srcSize too small) */
406unsigned long long ZSTD_getFrameContentSize(const void *src, size_t srcSize)
407{
408#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
409 if (ZSTD_isLegacy(src, srcSize)) {
410 unsigned long long const ret = ZSTD_getDecompressedSize_legacy(src, srcSize);
411 return ret == 0 ? ZSTD_CONTENTSIZE_UNKNOWN : ret;
412 }
413#endif
414 { ZSTD_frameHeader zfh;
415 if (ZSTD_getFrameHeader(&zfh, src, srcSize) != 0)
416 return ZSTD_CONTENTSIZE_ERROR;
417 if (zfh.frameType == ZSTD_skippableFrame) {
418 return 0;
419 } else {
420 return zfh.frameContentSize;
421 } }
422}
423
424/** ZSTD_findDecompressedSize() :
425 * compatible with legacy mode
426 * `srcSize` must be the exact length of some number of ZSTD compressed and/or
427 * skippable frames
428 * @return : decompressed size of the frames contained */
429unsigned long long ZSTD_findDecompressedSize(const void* src, size_t srcSize)
430{
431 unsigned long long totalDstSize = 0;
432
433 while (srcSize >= ZSTD_frameHeaderSize_prefix) {
434 U32 const magicNumber = MEM_readLE32(src);
435
436 if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
437 size_t skippableSize;
438 if (srcSize < ZSTD_skippableHeaderSize)
439 return ERROR(srcSize_wrong);
440 skippableSize = MEM_readLE32((const BYTE *)src + ZSTD_frameIdSize)
441 + ZSTD_skippableHeaderSize;
442 if (srcSize < skippableSize) {
443 return ZSTD_CONTENTSIZE_ERROR;
444 }
445
446 src = (const BYTE *)src + skippableSize;
447 srcSize -= skippableSize;
448 continue;
449 }
450
451 { unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
452 if (ret >= ZSTD_CONTENTSIZE_ERROR) return ret;
453
454 /* check for overflow */
455 if (totalDstSize + ret < totalDstSize) return ZSTD_CONTENTSIZE_ERROR;
456 totalDstSize += ret;
457 }
458 { size_t const frameSrcSize = ZSTD_findFrameCompressedSize(src, srcSize);
459 if (ZSTD_isError(frameSrcSize)) {
460 return ZSTD_CONTENTSIZE_ERROR;
461 }
462
463 src = (const BYTE *)src + frameSrcSize;
464 srcSize -= frameSrcSize;
465 }
466 } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
467
468 if (srcSize) return ZSTD_CONTENTSIZE_ERROR;
469
470 return totalDstSize;
471}
472
473/** ZSTD_getDecompressedSize() :
474* compatible with legacy mode
475* @return : decompressed size if known, 0 otherwise
476 note : 0 can mean any of the following :
477 - frame content is empty
478 - decompressed size field is not present in frame header
479 - frame header unknown / not supported
480 - frame header not complete (`srcSize` too small) */
481unsigned long long ZSTD_getDecompressedSize(const void* src, size_t srcSize)
482{
483 unsigned long long const ret = ZSTD_getFrameContentSize(src, srcSize);
484 ZSTD_STATIC_ASSERT(ZSTD_CONTENTSIZE_ERROR < ZSTD_CONTENTSIZE_UNKNOWN);
485 return (ret >= ZSTD_CONTENTSIZE_ERROR) ? 0 : ret;
486}
487
488
489/** ZSTD_decodeFrameHeader() :
490* `headerSize` must be the size provided by ZSTD_frameHeaderSize().
491* @return : 0 if success, or an error code, which can be tested using ZSTD_isError() */
492static size_t ZSTD_decodeFrameHeader(ZSTD_DCtx* dctx, const void* src, size_t headerSize)
493{
494 size_t const result = ZSTD_getFrameHeader_internal(&(dctx->fParams), src, headerSize, dctx->format);
495 if (ZSTD_isError(result)) return result; /* invalid header */
496 if (result>0) return ERROR(srcSize_wrong); /* headerSize too small */
497 if (dctx->fParams.dictID && (dctx->dictID != dctx->fParams.dictID))
498 return ERROR(dictionary_wrong);
499 if (dctx->fParams.checksumFlag) XXH64_reset(&dctx->xxhState, 0);
500 return 0;
501}
502
503
504/*-*************************************************************
505 * Block decoding
506 ***************************************************************/
507
508/*! ZSTD_getcBlockSize() :
509* Provides the size of compressed block from block header `src` */
510size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,
511 blockProperties_t* bpPtr)
512{
513 if (srcSize < ZSTD_blockHeaderSize) return ERROR(srcSize_wrong);
514 { U32 const cBlockHeader = MEM_readLE24(src);
515 U32 const cSize = cBlockHeader >> 3;
516 bpPtr->lastBlock = cBlockHeader & 1;
517 bpPtr->blockType = (blockType_e)((cBlockHeader >> 1) & 3);
518 bpPtr->origSize = cSize; /* only useful for RLE */
519 if (bpPtr->blockType == bt_rle) return 1;
520 if (bpPtr->blockType == bt_reserved) return ERROR(corruption_detected);
521 return cSize;
522 }
523}
524
525
526static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity,
527 const void* src, size_t srcSize)
528{
529 if (srcSize > dstCapacity) return ERROR(dstSize_tooSmall);
530 memcpy(dst, src, srcSize);
531 return srcSize;
532}
533
534
535static size_t ZSTD_setRleBlock(void* dst, size_t dstCapacity,
536 const void* src, size_t srcSize,
537 size_t regenSize)
538{
539 if (srcSize != 1) return ERROR(srcSize_wrong);
540 if (regenSize > dstCapacity) return ERROR(dstSize_tooSmall);
541 memset(dst, *(const BYTE*)src, regenSize);
542 return regenSize;
543}
544
545/*! ZSTD_decodeLiteralsBlock() :
546 * @return : nb of bytes read from src (< srcSize )
547 * note : symbol not declared but exposed for fullbench */
548size_t ZSTD_decodeLiteralsBlock(ZSTD_DCtx* dctx,
549 const void* src, size_t srcSize) /* note : srcSize < BLOCKSIZE */
550{
551 if (srcSize < MIN_CBLOCK_SIZE) return ERROR(corruption_detected);
552
553 { const BYTE* const istart = (const BYTE*) src;
554 symbolEncodingType_e const litEncType = (symbolEncodingType_e)(istart[0] & 3);
555
556 switch(litEncType)
557 {
558 case set_repeat:
559 if (dctx->litEntropy==0) return ERROR(dictionary_corrupted);
560 /* fall-through */
561 case set_compressed:
562 if (srcSize < 5) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need up to 5 for case 3 */
563 { size_t lhSize, litSize, litCSize;
564 U32 singleStream=0;
565 U32 const lhlCode = (istart[0] >> 2) & 3;
566 U32 const lhc = MEM_readLE32(istart);
567 switch(lhlCode)
568 {
569 case 0: case 1: default: /* note : default is impossible, since lhlCode into [0..3] */
570 /* 2 - 2 - 10 - 10 */
571 singleStream = !lhlCode;
572 lhSize = 3;
573 litSize = (lhc >> 4) & 0x3FF;
574 litCSize = (lhc >> 14) & 0x3FF;
575 break;
576 case 2:
577 /* 2 - 2 - 14 - 14 */
578 lhSize = 4;
579 litSize = (lhc >> 4) & 0x3FFF;
580 litCSize = lhc >> 18;
581 break;
582 case 3:
583 /* 2 - 2 - 18 - 18 */
584 lhSize = 5;
585 litSize = (lhc >> 4) & 0x3FFFF;
586 litCSize = (lhc >> 22) + (istart[4] << 10);
587 break;
588 }
589 if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
590 if (litCSize + lhSize > srcSize) return ERROR(corruption_detected);
591
592 if (HUF_isError((litEncType==set_repeat) ?
593 ( singleStream ?
594 HUF_decompress1X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) :
595 HUF_decompress4X_usingDTable_bmi2(dctx->litBuffer, litSize, istart+lhSize, litCSize, dctx->HUFptr, dctx->bmi2) ) :
596 ( singleStream ?
597 HUF_decompress1X2_DCtx_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
598 dctx->entropy.workspace, sizeof(dctx->entropy.workspace), dctx->bmi2) :
599 HUF_decompress4X_hufOnly_wksp_bmi2(dctx->entropy.hufTable, dctx->litBuffer, litSize, istart+lhSize, litCSize,
600 dctx->entropy.workspace, sizeof(dctx->entropy.workspace), dctx->bmi2))))
601 return ERROR(corruption_detected);
602
603 dctx->litPtr = dctx->litBuffer;
604 dctx->litSize = litSize;
605 dctx->litEntropy = 1;
606 if (litEncType==set_compressed) dctx->HUFptr = dctx->entropy.hufTable;
607 memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
608 return litCSize + lhSize;
609 }
610
611 case set_basic:
612 { size_t litSize, lhSize;
613 U32 const lhlCode = ((istart[0]) >> 2) & 3;
614 switch(lhlCode)
615 {
616 case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
617 lhSize = 1;
618 litSize = istart[0] >> 3;
619 break;
620 case 1:
621 lhSize = 2;
622 litSize = MEM_readLE16(istart) >> 4;
623 break;
624 case 3:
625 lhSize = 3;
626 litSize = MEM_readLE24(istart) >> 4;
627 break;
628 }
629
630 if (lhSize+litSize+WILDCOPY_OVERLENGTH > srcSize) { /* risk reading beyond src buffer with wildcopy */
631 if (litSize+lhSize > srcSize) return ERROR(corruption_detected);
632 memcpy(dctx->litBuffer, istart+lhSize, litSize);
633 dctx->litPtr = dctx->litBuffer;
634 dctx->litSize = litSize;
635 memset(dctx->litBuffer + dctx->litSize, 0, WILDCOPY_OVERLENGTH);
636 return lhSize+litSize;
637 }
638 /* direct reference into compressed stream */
639 dctx->litPtr = istart+lhSize;
640 dctx->litSize = litSize;
641 return lhSize+litSize;
642 }
643
644 case set_rle:
645 { U32 const lhlCode = ((istart[0]) >> 2) & 3;
646 size_t litSize, lhSize;
647 switch(lhlCode)
648 {
649 case 0: case 2: default: /* note : default is impossible, since lhlCode into [0..3] */
650 lhSize = 1;
651 litSize = istart[0] >> 3;
652 break;
653 case 1:
654 lhSize = 2;
655 litSize = MEM_readLE16(istart) >> 4;
656 break;
657 case 3:
658 lhSize = 3;
659 litSize = MEM_readLE24(istart) >> 4;
660 if (srcSize<4) return ERROR(corruption_detected); /* srcSize >= MIN_CBLOCK_SIZE == 3; here we need lhSize+1 = 4 */
661 break;
662 }
663 if (litSize > ZSTD_BLOCKSIZE_MAX) return ERROR(corruption_detected);
664 memset(dctx->litBuffer, istart[lhSize], litSize + WILDCOPY_OVERLENGTH);
665 dctx->litPtr = dctx->litBuffer;
666 dctx->litSize = litSize;
667 return lhSize+1;
668 }
669 default:
670 return ERROR(corruption_detected); /* impossible */
671 }
672 }
673}
674
675/* Default FSE distribution tables.
676 * These are pre-calculated FSE decoding tables using default distributions as defined in specification :
677 * https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#default-distributions
678 * They were generated programmatically with following method :
679 * - start from default distributions, present in /lib/common/zstd_internal.h
680 * - generate tables normally, using ZSTD_buildFSETable()
681 * - printout the content of tables
682 * - pretify output, report below, test with fuzzer to ensure it's correct */
683
684/* Default FSE distribution table for Literal Lengths */
685static const ZSTD_seqSymbol LL_defaultDTable[(1<<LL_DEFAULTNORMLOG)+1] = {
686 { 1, 1, 1, LL_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
687 /* nextState, nbAddBits, nbBits, baseVal */
688 { 0, 0, 4, 0}, { 16, 0, 4, 0},
689 { 32, 0, 5, 1}, { 0, 0, 5, 3},
690 { 0, 0, 5, 4}, { 0, 0, 5, 6},
691 { 0, 0, 5, 7}, { 0, 0, 5, 9},
692 { 0, 0, 5, 10}, { 0, 0, 5, 12},
693 { 0, 0, 6, 14}, { 0, 1, 5, 16},
694 { 0, 1, 5, 20}, { 0, 1, 5, 22},
695 { 0, 2, 5, 28}, { 0, 3, 5, 32},
696 { 0, 4, 5, 48}, { 32, 6, 5, 64},
697 { 0, 7, 5, 128}, { 0, 8, 6, 256},
698 { 0, 10, 6, 1024}, { 0, 12, 6, 4096},
699 { 32, 0, 4, 0}, { 0, 0, 4, 1},
700 { 0, 0, 5, 2}, { 32, 0, 5, 4},
701 { 0, 0, 5, 5}, { 32, 0, 5, 7},
702 { 0, 0, 5, 8}, { 32, 0, 5, 10},
703 { 0, 0, 5, 11}, { 0, 0, 6, 13},
704 { 32, 1, 5, 16}, { 0, 1, 5, 18},
705 { 32, 1, 5, 22}, { 0, 2, 5, 24},
706 { 32, 3, 5, 32}, { 0, 3, 5, 40},
707 { 0, 6, 4, 64}, { 16, 6, 4, 64},
708 { 32, 7, 5, 128}, { 0, 9, 6, 512},
709 { 0, 11, 6, 2048}, { 48, 0, 4, 0},
710 { 16, 0, 4, 1}, { 32, 0, 5, 2},
711 { 32, 0, 5, 3}, { 32, 0, 5, 5},
712 { 32, 0, 5, 6}, { 32, 0, 5, 8},
713 { 32, 0, 5, 9}, { 32, 0, 5, 11},
714 { 32, 0, 5, 12}, { 0, 0, 6, 15},
715 { 32, 1, 5, 18}, { 32, 1, 5, 20},
716 { 32, 2, 5, 24}, { 32, 2, 5, 28},
717 { 32, 3, 5, 40}, { 32, 4, 5, 48},
718 { 0, 16, 6,65536}, { 0, 15, 6,32768},
719 { 0, 14, 6,16384}, { 0, 13, 6, 8192},
720}; /* LL_defaultDTable */
721
722/* Default FSE distribution table for Offset Codes */
723static const ZSTD_seqSymbol OF_defaultDTable[(1<<OF_DEFAULTNORMLOG)+1] = {
724 { 1, 1, 1, OF_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
725 /* nextState, nbAddBits, nbBits, baseVal */
726 { 0, 0, 5, 0}, { 0, 6, 4, 61},
727 { 0, 9, 5, 509}, { 0, 15, 5,32765},
728 { 0, 21, 5,2097149}, { 0, 3, 5, 5},
729 { 0, 7, 4, 125}, { 0, 12, 5, 4093},
730 { 0, 18, 5,262141}, { 0, 23, 5,8388605},
731 { 0, 5, 5, 29}, { 0, 8, 4, 253},
732 { 0, 14, 5,16381}, { 0, 20, 5,1048573},
733 { 0, 2, 5, 1}, { 16, 7, 4, 125},
734 { 0, 11, 5, 2045}, { 0, 17, 5,131069},
735 { 0, 22, 5,4194301}, { 0, 4, 5, 13},
736 { 16, 8, 4, 253}, { 0, 13, 5, 8189},
737 { 0, 19, 5,524285}, { 0, 1, 5, 1},
738 { 16, 6, 4, 61}, { 0, 10, 5, 1021},
739 { 0, 16, 5,65533}, { 0, 28, 5,268435453},
740 { 0, 27, 5,134217725}, { 0, 26, 5,67108861},
741 { 0, 25, 5,33554429}, { 0, 24, 5,16777213},
742}; /* OF_defaultDTable */
743
744
745/* Default FSE distribution table for Match Lengths */
746static const ZSTD_seqSymbol ML_defaultDTable[(1<<ML_DEFAULTNORMLOG)+1] = {
747 { 1, 1, 1, ML_DEFAULTNORMLOG}, /* header : fastMode, tableLog */
748 /* nextState, nbAddBits, nbBits, baseVal */
749 { 0, 0, 6, 3}, { 0, 0, 4, 4},
750 { 32, 0, 5, 5}, { 0, 0, 5, 6},
751 { 0, 0, 5, 8}, { 0, 0, 5, 9},
752 { 0, 0, 5, 11}, { 0, 0, 6, 13},
753 { 0, 0, 6, 16}, { 0, 0, 6, 19},
754 { 0, 0, 6, 22}, { 0, 0, 6, 25},
755 { 0, 0, 6, 28}, { 0, 0, 6, 31},
756 { 0, 0, 6, 34}, { 0, 1, 6, 37},
757 { 0, 1, 6, 41}, { 0, 2, 6, 47},
758 { 0, 3, 6, 59}, { 0, 4, 6, 83},
759 { 0, 7, 6, 131}, { 0, 9, 6, 515},
760 { 16, 0, 4, 4}, { 0, 0, 4, 5},
761 { 32, 0, 5, 6}, { 0, 0, 5, 7},
762 { 32, 0, 5, 9}, { 0, 0, 5, 10},
763 { 0, 0, 6, 12}, { 0, 0, 6, 15},
764 { 0, 0, 6, 18}, { 0, 0, 6, 21},
765 { 0, 0, 6, 24}, { 0, 0, 6, 27},
766 { 0, 0, 6, 30}, { 0, 0, 6, 33},
767 { 0, 1, 6, 35}, { 0, 1, 6, 39},
768 { 0, 2, 6, 43}, { 0, 3, 6, 51},
769 { 0, 4, 6, 67}, { 0, 5, 6, 99},
770 { 0, 8, 6, 259}, { 32, 0, 4, 4},
771 { 48, 0, 4, 4}, { 16, 0, 4, 5},
772 { 32, 0, 5, 7}, { 32, 0, 5, 8},
773 { 32, 0, 5, 10}, { 32, 0, 5, 11},
774 { 0, 0, 6, 14}, { 0, 0, 6, 17},
775 { 0, 0, 6, 20}, { 0, 0, 6, 23},
776 { 0, 0, 6, 26}, { 0, 0, 6, 29},
777 { 0, 0, 6, 32}, { 0, 16, 6,65539},
778 { 0, 15, 6,32771}, { 0, 14, 6,16387},
779 { 0, 13, 6, 8195}, { 0, 12, 6, 4099},
780 { 0, 11, 6, 2051}, { 0, 10, 6, 1027},
781}; /* ML_defaultDTable */
782
783
784static void ZSTD_buildSeqTable_rle(ZSTD_seqSymbol* dt, U32 baseValue, U32 nbAddBits)
785{
786 void* ptr = dt;
787 ZSTD_seqSymbol_header* const DTableH = (ZSTD_seqSymbol_header*)ptr;
788 ZSTD_seqSymbol* const cell = dt + 1;
789
790 DTableH->tableLog = 0;
791 DTableH->fastMode = 0;
792
793 cell->nbBits = 0;
794 cell->nextState = 0;
795 assert(nbAddBits < 255);
796 cell->nbAdditionalBits = (BYTE)nbAddBits;
797 cell->baseValue = baseValue;
798}
799
800
801/* ZSTD_buildFSETable() :
802 * generate FSE decoding table for one symbol (ll, ml or off) */
803static void
804ZSTD_buildFSETable(ZSTD_seqSymbol* dt,
805 const short* normalizedCounter, unsigned maxSymbolValue,
806 const U32* baseValue, const U32* nbAdditionalBits,
807 unsigned tableLog)
808{
809 ZSTD_seqSymbol* const tableDecode = dt+1;
810 U16 symbolNext[MaxSeq+1];
811
812 U32 const maxSV1 = maxSymbolValue + 1;
813 U32 const tableSize = 1 << tableLog;
814 U32 highThreshold = tableSize-1;
815
816 /* Sanity Checks */
817 assert(maxSymbolValue <= MaxSeq);
818 assert(tableLog <= MaxFSELog);
819
820 /* Init, lay down lowprob symbols */
821 { ZSTD_seqSymbol_header DTableH;
822 DTableH.tableLog = tableLog;
823 DTableH.fastMode = 1;
824 { S16 const largeLimit= (S16)(1 << (tableLog-1));
825 U32 s;
826 for (s=0; s<maxSV1; s++) {
827 if (normalizedCounter[s]==-1) {
828 tableDecode[highThreshold--].baseValue = s;
829 symbolNext[s] = 1;
830 } else {
831 if (normalizedCounter[s] >= largeLimit) DTableH.fastMode=0;
832 symbolNext[s] = normalizedCounter[s];
833 } } }
834 memcpy(dt, &DTableH, sizeof(DTableH));
835 }
836
837 /* Spread symbols */
838 { U32 const tableMask = tableSize-1;
839 U32 const step = FSE_TABLESTEP(tableSize);
840 U32 s, position = 0;
841 for (s=0; s<maxSV1; s++) {
842 int i;
843 for (i=0; i<normalizedCounter[s]; i++) {
844 tableDecode[position].baseValue = s;
845 position = (position + step) & tableMask;
846 while (position > highThreshold) position = (position + step) & tableMask; /* lowprob area */
847 } }
848 assert(position == 0); /* position must reach all cells once, otherwise normalizedCounter is incorrect */
849 }
850
851 /* Build Decoding table */
852 { U32 u;
853 for (u=0; u<tableSize; u++) {
854 U32 const symbol = tableDecode[u].baseValue;
855 U32 const nextState = symbolNext[symbol]++;
856 tableDecode[u].nbBits = (BYTE) (tableLog - BIT_highbit32(nextState) );
857 tableDecode[u].nextState = (U16) ( (nextState << tableDecode[u].nbBits) - tableSize);
858 assert(nbAdditionalBits[symbol] < 255);
859 tableDecode[u].nbAdditionalBits = (BYTE)nbAdditionalBits[symbol];
860 tableDecode[u].baseValue = baseValue[symbol];
861 } }
862}
863
864
865/*! ZSTD_buildSeqTable() :
866 * @return : nb bytes read from src,
867 * or an error code if it fails */
868static size_t ZSTD_buildSeqTable(ZSTD_seqSymbol* DTableSpace, const ZSTD_seqSymbol** DTablePtr,
869 symbolEncodingType_e type, U32 max, U32 maxLog,
870 const void* src, size_t srcSize,
871 const U32* baseValue, const U32* nbAdditionalBits,
872 const ZSTD_seqSymbol* defaultTable, U32 flagRepeatTable)
873{
874 switch(type)
875 {
876 case set_rle :
877 if (!srcSize) return ERROR(srcSize_wrong);
878 if ( (*(const BYTE*)src) > max) return ERROR(corruption_detected);
879 { U32 const symbol = *(const BYTE*)src;
880 U32 const baseline = baseValue[symbol];
881 U32 const nbBits = nbAdditionalBits[symbol];
882 ZSTD_buildSeqTable_rle(DTableSpace, baseline, nbBits);
883 }
884 *DTablePtr = DTableSpace;
885 return 1;
886 case set_basic :
887 *DTablePtr = defaultTable;
888 return 0;
889 case set_repeat:
890 if (!flagRepeatTable) return ERROR(corruption_detected);
891 return 0;
892 case set_compressed :
893 { U32 tableLog;
894 S16 norm[MaxSeq+1];
895 size_t const headerSize = FSE_readNCount(norm, &max, &tableLog, src, srcSize);
896 if (FSE_isError(headerSize)) return ERROR(corruption_detected);
897 if (tableLog > maxLog) return ERROR(corruption_detected);
898 ZSTD_buildFSETable(DTableSpace, norm, max, baseValue, nbAdditionalBits, tableLog);
899 *DTablePtr = DTableSpace;
900 return headerSize;
901 }
902 default : /* impossible */
903 assert(0);
904 return ERROR(GENERIC);
905 }
906}
907
908static const U32 LL_base[MaxLL+1] = {
909 0, 1, 2, 3, 4, 5, 6, 7,
910 8, 9, 10, 11, 12, 13, 14, 15,
911 16, 18, 20, 22, 24, 28, 32, 40,
912 48, 64, 0x80, 0x100, 0x200, 0x400, 0x800, 0x1000,
913 0x2000, 0x4000, 0x8000, 0x10000 };
914
915static const U32 OF_base[MaxOff+1] = {
916 0, 1, 1, 5, 0xD, 0x1D, 0x3D, 0x7D,
917 0xFD, 0x1FD, 0x3FD, 0x7FD, 0xFFD, 0x1FFD, 0x3FFD, 0x7FFD,
918 0xFFFD, 0x1FFFD, 0x3FFFD, 0x7FFFD, 0xFFFFD, 0x1FFFFD, 0x3FFFFD, 0x7FFFFD,
919 0xFFFFFD, 0x1FFFFFD, 0x3FFFFFD, 0x7FFFFFD, 0xFFFFFFD, 0x1FFFFFFD, 0x3FFFFFFD, 0x7FFFFFFD };
920
921static const U32 OF_bits[MaxOff+1] = {
922 0, 1, 2, 3, 4, 5, 6, 7,
923 8, 9, 10, 11, 12, 13, 14, 15,
924 16, 17, 18, 19, 20, 21, 22, 23,
925 24, 25, 26, 27, 28, 29, 30, 31 };
926
927static const U32 ML_base[MaxML+1] = {
928 3, 4, 5, 6, 7, 8, 9, 10,
929 11, 12, 13, 14, 15, 16, 17, 18,
930 19, 20, 21, 22, 23, 24, 25, 26,
931 27, 28, 29, 30, 31, 32, 33, 34,
932 35, 37, 39, 41, 43, 47, 51, 59,
933 67, 83, 99, 0x83, 0x103, 0x203, 0x403, 0x803,
934 0x1003, 0x2003, 0x4003, 0x8003, 0x10003 };
935
936
937size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,
938 const void* src, size_t srcSize)
939{
940 const BYTE* const istart = (const BYTE* const)src;
941 const BYTE* const iend = istart + srcSize;
942 const BYTE* ip = istart;
943 DEBUGLOG(5, "ZSTD_decodeSeqHeaders");
944
945 /* check */
946 if (srcSize < MIN_SEQUENCES_SIZE) return ERROR(srcSize_wrong);
947
948 /* SeqHead */
949 { int nbSeq = *ip++;
950 if (!nbSeq) { *nbSeqPtr=0; return 1; }
951 if (nbSeq > 0x7F) {
952 if (nbSeq == 0xFF) {
953 if (ip+2 > iend) return ERROR(srcSize_wrong);
954 nbSeq = MEM_readLE16(ip) + LONGNBSEQ, ip+=2;
955 } else {
956 if (ip >= iend) return ERROR(srcSize_wrong);
957 nbSeq = ((nbSeq-0x80)<<8) + *ip++;
958 }
959 }
960 *nbSeqPtr = nbSeq;
961 }
962
963 /* FSE table descriptors */
964 if (ip+4 > iend) return ERROR(srcSize_wrong); /* minimum possible size */
965 { symbolEncodingType_e const LLtype = (symbolEncodingType_e)(*ip >> 6);
966 symbolEncodingType_e const OFtype = (symbolEncodingType_e)((*ip >> 4) & 3);
967 symbolEncodingType_e const MLtype = (symbolEncodingType_e)((*ip >> 2) & 3);
968 ip++;
969
970 /* Build DTables */
971 { size_t const llhSize = ZSTD_buildSeqTable(dctx->entropy.LLTable, &dctx->LLTptr,
972 LLtype, MaxLL, LLFSELog,
973 ip, iend-ip,
974 LL_base, LL_bits,
975 LL_defaultDTable, dctx->fseEntropy);
976 if (ZSTD_isError(llhSize)) return ERROR(corruption_detected);
977 ip += llhSize;
978 }
979
980 { size_t const ofhSize = ZSTD_buildSeqTable(dctx->entropy.OFTable, &dctx->OFTptr,
981 OFtype, MaxOff, OffFSELog,
982 ip, iend-ip,
983 OF_base, OF_bits,
984 OF_defaultDTable, dctx->fseEntropy);
985 if (ZSTD_isError(ofhSize)) return ERROR(corruption_detected);
986 ip += ofhSize;
987 }
988
989 { size_t const mlhSize = ZSTD_buildSeqTable(dctx->entropy.MLTable, &dctx->MLTptr,
990 MLtype, MaxML, MLFSELog,
991 ip, iend-ip,
992 ML_base, ML_bits,
993 ML_defaultDTable, dctx->fseEntropy);
994 if (ZSTD_isError(mlhSize)) return ERROR(corruption_detected);
995 ip += mlhSize;
996 }
997 }
998
999 return ip-istart;
1000}
1001
1002
1003typedef struct {
1004 size_t litLength;
1005 size_t matchLength;
1006 size_t offset;
1007 const BYTE* match;
1008} seq_t;
1009
1010typedef struct {
1011 size_t state;
1012 const ZSTD_seqSymbol* table;
1013} ZSTD_fseState;
1014
1015typedef struct {
1016 BIT_DStream_t DStream;
1017 ZSTD_fseState stateLL;
1018 ZSTD_fseState stateOffb;
1019 ZSTD_fseState stateML;
1020 size_t prevOffset[ZSTD_REP_NUM];
1021 const BYTE* prefixStart;
1022 const BYTE* dictEnd;
1023 size_t pos;
1024} seqState_t;
1025
1026
1027FORCE_NOINLINE
1028size_t ZSTD_execSequenceLast7(BYTE* op,
1029 BYTE* const oend, seq_t sequence,
1030 const BYTE** litPtr, const BYTE* const litLimit,
1031 const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
1032{
1033 BYTE* const oLitEnd = op + sequence.litLength;
1034 size_t const sequenceLength = sequence.litLength + sequence.matchLength;
1035 BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
1036 BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
1037 const BYTE* const iLitEnd = *litPtr + sequence.litLength;
1038 const BYTE* match = oLitEnd - sequence.offset;
1039
1040 /* check */
1041 if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
1042 if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
1043 if (oLitEnd <= oend_w) return ERROR(GENERIC); /* Precondition */
1044
1045 /* copy literals */
1046 if (op < oend_w) {
1047 ZSTD_wildcopy(op, *litPtr, oend_w - op);
1048 *litPtr += oend_w - op;
1049 op = oend_w;
1050 }
1051 while (op < oLitEnd) *op++ = *(*litPtr)++;
1052
1053 /* copy Match */
1054 if (sequence.offset > (size_t)(oLitEnd - base)) {
1055 /* offset beyond prefix */
1056 if (sequence.offset > (size_t)(oLitEnd - vBase)) return ERROR(corruption_detected);
1057 match = dictEnd - (base-match);
1058 if (match + sequence.matchLength <= dictEnd) {
1059 memmove(oLitEnd, match, sequence.matchLength);
1060 return sequenceLength;
1061 }
1062 /* span extDict & currentPrefixSegment */
1063 { size_t const length1 = dictEnd - match;
1064 memmove(oLitEnd, match, length1);
1065 op = oLitEnd + length1;
1066 sequence.matchLength -= length1;
1067 match = base;
1068 } }
1069 while (op < oMatchEnd) *op++ = *match++;
1070 return sequenceLength;
1071}
1072
1073
1074HINT_INLINE
1075size_t ZSTD_execSequence(BYTE* op,
1076 BYTE* const oend, seq_t sequence,
1077 const BYTE** litPtr, const BYTE* const litLimit,
1078 const BYTE* const base, const BYTE* const vBase, const BYTE* const dictEnd)
1079{
1080 BYTE* const oLitEnd = op + sequence.litLength;
1081 size_t const sequenceLength = sequence.litLength + sequence.matchLength;
1082 BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
1083 BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
1084 const BYTE* const iLitEnd = *litPtr + sequence.litLength;
1085 const BYTE* match = oLitEnd - sequence.offset;
1086
1087 /* check */
1088 if (oMatchEnd>oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
1089 if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
1090 if (oLitEnd>oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, base, vBase, dictEnd);
1091
1092 /* copy Literals */
1093 ZSTD_copy8(op, *litPtr);
1094 if (sequence.litLength > 8)
1095 ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
1096 op = oLitEnd;
1097 *litPtr = iLitEnd; /* update for next sequence */
1098
1099 /* copy Match */
1100 if (sequence.offset > (size_t)(oLitEnd - base)) {
1101 /* offset beyond prefix -> go into extDict */
1102 if (sequence.offset > (size_t)(oLitEnd - vBase))
1103 return ERROR(corruption_detected);
1104 match = dictEnd + (match - base);
1105 if (match + sequence.matchLength <= dictEnd) {
1106 memmove(oLitEnd, match, sequence.matchLength);
1107 return sequenceLength;
1108 }
1109 /* span extDict & currentPrefixSegment */
1110 { size_t const length1 = dictEnd - match;
1111 memmove(oLitEnd, match, length1);
1112 op = oLitEnd + length1;
1113 sequence.matchLength -= length1;
1114 match = base;
1115 if (op > oend_w || sequence.matchLength < MINMATCH) {
1116 U32 i;
1117 for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
1118 return sequenceLength;
1119 }
1120 } }
1121 /* Requirement: op <= oend_w && sequence.matchLength >= MINMATCH */
1122
1123 /* match within prefix */
1124 if (sequence.offset < 8) {
1125 /* close range match, overlap */
1126 static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
1127 static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
1128 int const sub2 = dec64table[sequence.offset];
1129 op[0] = match[0];
1130 op[1] = match[1];
1131 op[2] = match[2];
1132 op[3] = match[3];
1133 match += dec32table[sequence.offset];
1134 ZSTD_copy4(op+4, match);
1135 match -= sub2;
1136 } else {
1137 ZSTD_copy8(op, match);
1138 }
1139 op += 8; match += 8;
1140
1141 if (oMatchEnd > oend-(16-MINMATCH)) {
1142 if (op < oend_w) {
1143 ZSTD_wildcopy(op, match, oend_w - op);
1144 match += oend_w - op;
1145 op = oend_w;
1146 }
1147 while (op < oMatchEnd) *op++ = *match++;
1148 } else {
1149 ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
1150 }
1151 return sequenceLength;
1152}
1153
1154
1155HINT_INLINE
1156size_t ZSTD_execSequenceLong(BYTE* op,
1157 BYTE* const oend, seq_t sequence,
1158 const BYTE** litPtr, const BYTE* const litLimit,
1159 const BYTE* const prefixStart, const BYTE* const dictStart, const BYTE* const dictEnd)
1160{
1161 BYTE* const oLitEnd = op + sequence.litLength;
1162 size_t const sequenceLength = sequence.litLength + sequence.matchLength;
1163 BYTE* const oMatchEnd = op + sequenceLength; /* risk : address space overflow (32-bits) */
1164 BYTE* const oend_w = oend - WILDCOPY_OVERLENGTH;
1165 const BYTE* const iLitEnd = *litPtr + sequence.litLength;
1166 const BYTE* match = sequence.match;
1167
1168 /* check */
1169 if (oMatchEnd > oend) return ERROR(dstSize_tooSmall); /* last match must start at a minimum distance of WILDCOPY_OVERLENGTH from oend */
1170 if (iLitEnd > litLimit) return ERROR(corruption_detected); /* over-read beyond lit buffer */
1171 if (oLitEnd > oend_w) return ZSTD_execSequenceLast7(op, oend, sequence, litPtr, litLimit, prefixStart, dictStart, dictEnd);
1172
1173 /* copy Literals */
1174 ZSTD_copy8(op, *litPtr); /* note : op <= oLitEnd <= oend_w == oend - 8 */
1175 if (sequence.litLength > 8)
1176 ZSTD_wildcopy(op+8, (*litPtr)+8, sequence.litLength - 8); /* note : since oLitEnd <= oend-WILDCOPY_OVERLENGTH, no risk of overwrite beyond oend */
1177 op = oLitEnd;
1178 *litPtr = iLitEnd; /* update for next sequence */
1179
1180 /* copy Match */
1181 if (sequence.offset > (size_t)(oLitEnd - prefixStart)) {
1182 /* offset beyond prefix */
1183 if (sequence.offset > (size_t)(oLitEnd - dictStart)) return ERROR(corruption_detected);
1184 if (match + sequence.matchLength <= dictEnd) {
1185 memmove(oLitEnd, match, sequence.matchLength);
1186 return sequenceLength;
1187 }
1188 /* span extDict & currentPrefixSegment */
1189 { size_t const length1 = dictEnd - match;
1190 memmove(oLitEnd, match, length1);
1191 op = oLitEnd + length1;
1192 sequence.matchLength -= length1;
1193 match = prefixStart;
1194 if (op > oend_w || sequence.matchLength < MINMATCH) {
1195 U32 i;
1196 for (i = 0; i < sequence.matchLength; ++i) op[i] = match[i];
1197 return sequenceLength;
1198 }
1199 } }
1200 assert(op <= oend_w);
1201 assert(sequence.matchLength >= MINMATCH);
1202
1203 /* match within prefix */
1204 if (sequence.offset < 8) {
1205 /* close range match, overlap */
1206 static const U32 dec32table[] = { 0, 1, 2, 1, 4, 4, 4, 4 }; /* added */
1207 static const int dec64table[] = { 8, 8, 8, 7, 8, 9,10,11 }; /* subtracted */
1208 int const sub2 = dec64table[sequence.offset];
1209 op[0] = match[0];
1210 op[1] = match[1];
1211 op[2] = match[2];
1212 op[3] = match[3];
1213 match += dec32table[sequence.offset];
1214 ZSTD_copy4(op+4, match);
1215 match -= sub2;
1216 } else {
1217 ZSTD_copy8(op, match);
1218 }
1219 op += 8; match += 8;
1220
1221 if (oMatchEnd > oend-(16-MINMATCH)) {
1222 if (op < oend_w) {
1223 ZSTD_wildcopy(op, match, oend_w - op);
1224 match += oend_w - op;
1225 op = oend_w;
1226 }
1227 while (op < oMatchEnd) *op++ = *match++;
1228 } else {
1229 ZSTD_wildcopy(op, match, (ptrdiff_t)sequence.matchLength-8); /* works even if matchLength < 8 */
1230 }
1231 return sequenceLength;
1232}
1233
1234static void
1235ZSTD_initFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD, const ZSTD_seqSymbol* dt)
1236{
1237 const void* ptr = dt;
1238 const ZSTD_seqSymbol_header* const DTableH = (const ZSTD_seqSymbol_header*)ptr;
1239 DStatePtr->state = BIT_readBits(bitD, DTableH->tableLog);
1240 DEBUGLOG(6, "ZSTD_initFseState : val=%u using %u bits",
1241 (U32)DStatePtr->state, DTableH->tableLog);
1242 BIT_reloadDStream(bitD);
1243 DStatePtr->table = dt + 1;
1244}
1245
1246FORCE_INLINE_TEMPLATE void
1247ZSTD_updateFseState(ZSTD_fseState* DStatePtr, BIT_DStream_t* bitD)
1248{
1249 ZSTD_seqSymbol const DInfo = DStatePtr->table[DStatePtr->state];
1250 U32 const nbBits = DInfo.nbBits;
1251 size_t const lowBits = BIT_readBits(bitD, nbBits);
1252 DStatePtr->state = DInfo.nextState + lowBits;
1253}
1254
1255/* We need to add at most (ZSTD_WINDOWLOG_MAX_32 - 1) bits to read the maximum
1256 * offset bits. But we can only read at most (STREAM_ACCUMULATOR_MIN_32 - 1)
1257 * bits before reloading. This value is the maximum number of bytes we read
1258 * after reloading when we are decoding long offets.
1259 */
1260#define LONG_OFFSETS_MAX_EXTRA_BITS_32 \
1261 (ZSTD_WINDOWLOG_MAX_32 > STREAM_ACCUMULATOR_MIN_32 \
1262 ? ZSTD_WINDOWLOG_MAX_32 - STREAM_ACCUMULATOR_MIN_32 \
1263 : 0)
1264
1265typedef enum { ZSTD_lo_isRegularOffset, ZSTD_lo_isLongOffset=1 } ZSTD_longOffset_e;
1266
1267FORCE_INLINE_TEMPLATE seq_t
1268ZSTD_decodeSequence(seqState_t* seqState, const ZSTD_longOffset_e longOffsets)
1269{
1270 seq_t seq;
1271 U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
1272 U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
1273 U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
1274 U32 const totalBits = llBits+mlBits+ofBits;
1275 U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
1276 U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
1277 U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
1278
1279 /* sequence */
1280 { size_t offset;
1281 if (!ofBits)
1282 offset = 0;
1283 else {
1284 ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
1285 ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
1286 assert(ofBits <= MaxOff);
1287 if (MEM_32bits() && longOffsets && (ofBits >= STREAM_ACCUMULATOR_MIN_32)) {
1288 U32 const extraBits = ofBits - MIN(ofBits, 32 - seqState->DStream.bitsConsumed);
1289 offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
1290 BIT_reloadDStream(&seqState->DStream);
1291 if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
1292 assert(extraBits <= LONG_OFFSETS_MAX_EXTRA_BITS_32); /* to avoid another reload */
1293 } else {
1294 offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits/*>0*/); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
1295 if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
1296 }
1297 }
1298
1299 if (ofBits <= 1) {
1300 offset += (llBase==0);
1301 if (offset) {
1302 size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
1303 temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
1304 if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
1305 seqState->prevOffset[1] = seqState->prevOffset[0];
1306 seqState->prevOffset[0] = offset = temp;
1307 } else { /* offset == 0 */
1308 offset = seqState->prevOffset[0];
1309 }
1310 } else {
1311 seqState->prevOffset[2] = seqState->prevOffset[1];
1312 seqState->prevOffset[1] = seqState->prevOffset[0];
1313 seqState->prevOffset[0] = offset;
1314 }
1315 seq.offset = offset;
1316 }
1317
1318 seq.matchLength = mlBase
1319 + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits/*>0*/) : 0); /* <= 16 bits */
1320 if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
1321 BIT_reloadDStream(&seqState->DStream);
1322 if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
1323 BIT_reloadDStream(&seqState->DStream);
1324 /* Ensure there are enough bits to read the rest of data in 64-bit mode. */
1325 ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
1326
1327 seq.litLength = llBase
1328 + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits/*>0*/) : 0); /* <= 16 bits */
1329 if (MEM_32bits())
1330 BIT_reloadDStream(&seqState->DStream);
1331
1332 DEBUGLOG(6, "seq: litL=%u, matchL=%u, offset=%u",
1333 (U32)seq.litLength, (U32)seq.matchLength, (U32)seq.offset);
1334
1335 /* ANS state update */
1336 ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
1337 ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
1338 if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
1339 ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
1340
1341 return seq;
1342}
1343
1344FORCE_INLINE_TEMPLATE size_t
1345ZSTD_decompressSequences_body( ZSTD_DCtx* dctx,
1346 void* dst, size_t maxDstSize,
1347 const void* seqStart, size_t seqSize, int nbSeq,
1348 const ZSTD_longOffset_e isLongOffset)
1349{
1350 const BYTE* ip = (const BYTE*)seqStart;
1351 const BYTE* const iend = ip + seqSize;
1352 BYTE* const ostart = (BYTE* const)dst;
1353 BYTE* const oend = ostart + maxDstSize;
1354 BYTE* op = ostart;
1355 const BYTE* litPtr = dctx->litPtr;
1356 const BYTE* const litEnd = litPtr + dctx->litSize;
1357 const BYTE* const base = (const BYTE*) (dctx->base);
1358 const BYTE* const vBase = (const BYTE*) (dctx->vBase);
1359 const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
1360 DEBUGLOG(5, "ZSTD_decompressSequences");
1361
1362 /* Regen sequences */
1363 if (nbSeq) {
1364 seqState_t seqState;
1365 dctx->fseEntropy = 1;
1366 { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
1367 CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
1368 ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
1369 ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
1370 ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
1371
1372 for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && nbSeq ; ) {
1373 nbSeq--;
1374 { seq_t const sequence = ZSTD_decodeSequence(&seqState, isLongOffset);
1375 size_t const oneSeqSize = ZSTD_execSequence(op, oend, sequence, &litPtr, litEnd, base, vBase, dictEnd);
1376 DEBUGLOG(6, "regenerated sequence size : %u", (U32)oneSeqSize);
1377 if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
1378 op += oneSeqSize;
1379 } }
1380
1381 /* check if reached exact end */
1382 DEBUGLOG(5, "ZSTD_decompressSequences: after decode loop, remaining nbSeq : %i", nbSeq);
1383 if (nbSeq) return ERROR(corruption_detected);
1384 /* save reps for next block */
1385 { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
1386 }
1387
1388 /* last literal segment */
1389 { size_t const lastLLSize = litEnd - litPtr;
1390 if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
1391 memcpy(op, litPtr, lastLLSize);
1392 op += lastLLSize;
1393 }
1394
1395 return op-ostart;
1396}
1397
1398static size_t
1399ZSTD_decompressSequences_default(ZSTD_DCtx* dctx,
1400 void* dst, size_t maxDstSize,
1401 const void* seqStart, size_t seqSize, int nbSeq,
1402 const ZSTD_longOffset_e isLongOffset)
1403{
1404 return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1405}
1406
1407
1408
1409FORCE_INLINE_TEMPLATE seq_t
1410ZSTD_decodeSequenceLong(seqState_t* seqState, ZSTD_longOffset_e const longOffsets)
1411{
1412 seq_t seq;
1413 U32 const llBits = seqState->stateLL.table[seqState->stateLL.state].nbAdditionalBits;
1414 U32 const mlBits = seqState->stateML.table[seqState->stateML.state].nbAdditionalBits;
1415 U32 const ofBits = seqState->stateOffb.table[seqState->stateOffb.state].nbAdditionalBits;
1416 U32 const totalBits = llBits+mlBits+ofBits;
1417 U32 const llBase = seqState->stateLL.table[seqState->stateLL.state].baseValue;
1418 U32 const mlBase = seqState->stateML.table[seqState->stateML.state].baseValue;
1419 U32 const ofBase = seqState->stateOffb.table[seqState->stateOffb.state].baseValue;
1420
1421 /* sequence */
1422 { size_t offset;
1423 if (!ofBits)
1424 offset = 0;
1425 else {
1426 ZSTD_STATIC_ASSERT(ZSTD_lo_isLongOffset == 1);
1427 ZSTD_STATIC_ASSERT(LONG_OFFSETS_MAX_EXTRA_BITS_32 == 5);
1428 assert(ofBits <= MaxOff);
1429 if (MEM_32bits() && longOffsets) {
1430 U32 const extraBits = ofBits - MIN(ofBits, STREAM_ACCUMULATOR_MIN_32-1);
1431 offset = ofBase + (BIT_readBitsFast(&seqState->DStream, ofBits - extraBits) << extraBits);
1432 if (MEM_32bits() || extraBits) BIT_reloadDStream(&seqState->DStream);
1433 if (extraBits) offset += BIT_readBitsFast(&seqState->DStream, extraBits);
1434 } else {
1435 offset = ofBase + BIT_readBitsFast(&seqState->DStream, ofBits); /* <= (ZSTD_WINDOWLOG_MAX-1) bits */
1436 if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream);
1437 }
1438 }
1439
1440 if (ofBits <= 1) {
1441 offset += (llBase==0);
1442 if (offset) {
1443 size_t temp = (offset==3) ? seqState->prevOffset[0] - 1 : seqState->prevOffset[offset];
1444 temp += !temp; /* 0 is not valid; input is corrupted; force offset to 1 */
1445 if (offset != 1) seqState->prevOffset[2] = seqState->prevOffset[1];
1446 seqState->prevOffset[1] = seqState->prevOffset[0];
1447 seqState->prevOffset[0] = offset = temp;
1448 } else {
1449 offset = seqState->prevOffset[0];
1450 }
1451 } else {
1452 seqState->prevOffset[2] = seqState->prevOffset[1];
1453 seqState->prevOffset[1] = seqState->prevOffset[0];
1454 seqState->prevOffset[0] = offset;
1455 }
1456 seq.offset = offset;
1457 }
1458
1459 seq.matchLength = mlBase + ((mlBits>0) ? BIT_readBitsFast(&seqState->DStream, mlBits) : 0); /* <= 16 bits */
1460 if (MEM_32bits() && (mlBits+llBits >= STREAM_ACCUMULATOR_MIN_32-LONG_OFFSETS_MAX_EXTRA_BITS_32))
1461 BIT_reloadDStream(&seqState->DStream);
1462 if (MEM_64bits() && (totalBits >= STREAM_ACCUMULATOR_MIN_64-(LLFSELog+MLFSELog+OffFSELog)))
1463 BIT_reloadDStream(&seqState->DStream);
1464 /* Verify that there is enough bits to read the rest of the data in 64-bit mode. */
1465 ZSTD_STATIC_ASSERT(16+LLFSELog+MLFSELog+OffFSELog < STREAM_ACCUMULATOR_MIN_64);
1466
1467 seq.litLength = llBase + ((llBits>0) ? BIT_readBitsFast(&seqState->DStream, llBits) : 0); /* <= 16 bits */
1468 if (MEM_32bits())
1469 BIT_reloadDStream(&seqState->DStream);
1470
1471 { size_t const pos = seqState->pos + seq.litLength;
1472 const BYTE* const matchBase = (seq.offset > pos) ? seqState->dictEnd : seqState->prefixStart;
1473 seq.match = matchBase + pos - seq.offset; /* note : this operation can overflow when seq.offset is really too large, which can only happen when input is corrupted.
1474 * No consequence though : no memory access will occur, overly large offset will be detected in ZSTD_execSequenceLong() */
1475 seqState->pos = pos + seq.matchLength;
1476 }
1477
1478 /* ANS state update */
1479 ZSTD_updateFseState(&seqState->stateLL, &seqState->DStream); /* <= 9 bits */
1480 ZSTD_updateFseState(&seqState->stateML, &seqState->DStream); /* <= 9 bits */
1481 if (MEM_32bits()) BIT_reloadDStream(&seqState->DStream); /* <= 18 bits */
1482 ZSTD_updateFseState(&seqState->stateOffb, &seqState->DStream); /* <= 8 bits */
1483
1484 return seq;
1485}
1486
1487FORCE_INLINE_TEMPLATE size_t
1488ZSTD_decompressSequencesLong_body(
1489 ZSTD_DCtx* dctx,
1490 void* dst, size_t maxDstSize,
1491 const void* seqStart, size_t seqSize, int nbSeq,
1492 const ZSTD_longOffset_e isLongOffset)
1493{
1494 const BYTE* ip = (const BYTE*)seqStart;
1495 const BYTE* const iend = ip + seqSize;
1496 BYTE* const ostart = (BYTE* const)dst;
1497 BYTE* const oend = ostart + maxDstSize;
1498 BYTE* op = ostart;
1499 const BYTE* litPtr = dctx->litPtr;
1500 const BYTE* const litEnd = litPtr + dctx->litSize;
1501 const BYTE* const prefixStart = (const BYTE*) (dctx->base);
1502 const BYTE* const dictStart = (const BYTE*) (dctx->vBase);
1503 const BYTE* const dictEnd = (const BYTE*) (dctx->dictEnd);
1504
1505 /* Regen sequences */
1506 if (nbSeq) {
1507#define STORED_SEQS 4
1508#define STOSEQ_MASK (STORED_SEQS-1)
1509#define ADVANCED_SEQS 4
1510 seq_t sequences[STORED_SEQS];
1511 int const seqAdvance = MIN(nbSeq, ADVANCED_SEQS);
1512 seqState_t seqState;
1513 int seqNb;
1514 dctx->fseEntropy = 1;
1515 { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) seqState.prevOffset[i] = dctx->entropy.rep[i]; }
1516 seqState.prefixStart = prefixStart;
1517 seqState.pos = (size_t)(op-prefixStart);
1518 seqState.dictEnd = dictEnd;
1519 CHECK_E(BIT_initDStream(&seqState.DStream, ip, iend-ip), corruption_detected);
1520 ZSTD_initFseState(&seqState.stateLL, &seqState.DStream, dctx->LLTptr);
1521 ZSTD_initFseState(&seqState.stateOffb, &seqState.DStream, dctx->OFTptr);
1522 ZSTD_initFseState(&seqState.stateML, &seqState.DStream, dctx->MLTptr);
1523
1524 /* prepare in advance */
1525 for (seqNb=0; (BIT_reloadDStream(&seqState.DStream) <= BIT_DStream_completed) && (seqNb<seqAdvance); seqNb++) {
1526 sequences[seqNb] = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
1527 }
1528 if (seqNb<seqAdvance) return ERROR(corruption_detected);
1529
1530 /* decode and decompress */
1531 for ( ; (BIT_reloadDStream(&(seqState.DStream)) <= BIT_DStream_completed) && (seqNb<nbSeq) ; seqNb++) {
1532 seq_t const sequence = ZSTD_decodeSequenceLong(&seqState, isLongOffset);
1533 size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[(seqNb-ADVANCED_SEQS) & STOSEQ_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
1534 if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
1535 PREFETCH(sequence.match); /* note : it's safe to invoke PREFETCH() on any memory address, including invalid ones */
1536 sequences[seqNb&STOSEQ_MASK] = sequence;
1537 op += oneSeqSize;
1538 }
1539 if (seqNb<nbSeq) return ERROR(corruption_detected);
1540
1541 /* finish queue */
1542 seqNb -= seqAdvance;
1543 for ( ; seqNb<nbSeq ; seqNb++) {
1544 size_t const oneSeqSize = ZSTD_execSequenceLong(op, oend, sequences[seqNb&STOSEQ_MASK], &litPtr, litEnd, prefixStart, dictStart, dictEnd);
1545 if (ZSTD_isError(oneSeqSize)) return oneSeqSize;
1546 op += oneSeqSize;
1547 }
1548
1549 /* save reps for next block */
1550 { U32 i; for (i=0; i<ZSTD_REP_NUM; i++) dctx->entropy.rep[i] = (U32)(seqState.prevOffset[i]); }
1551#undef STORED_SEQS
1552#undef STOSEQ_MASK
1553#undef ADVANCED_SEQS
1554 }
1555
1556 /* last literal segment */
1557 { size_t const lastLLSize = litEnd - litPtr;
1558 if (lastLLSize > (size_t)(oend-op)) return ERROR(dstSize_tooSmall);
1559 memcpy(op, litPtr, lastLLSize);
1560 op += lastLLSize;
1561 }
1562
1563 return op-ostart;
1564}
1565
1566static size_t
1567ZSTD_decompressSequencesLong_default(ZSTD_DCtx* dctx,
1568 void* dst, size_t maxDstSize,
1569 const void* seqStart, size_t seqSize, int nbSeq,
1570 const ZSTD_longOffset_e isLongOffset)
1571{
1572 return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1573}
1574
1575
1576
1577#if DYNAMIC_BMI2
1578
1579static TARGET_ATTRIBUTE("bmi2") size_t
1580ZSTD_decompressSequences_bmi2(ZSTD_DCtx* dctx,
1581 void* dst, size_t maxDstSize,
1582 const void* seqStart, size_t seqSize, int nbSeq,
1583 const ZSTD_longOffset_e isLongOffset)
1584{
1585 return ZSTD_decompressSequences_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1586}
1587
1588static TARGET_ATTRIBUTE("bmi2") size_t
1589ZSTD_decompressSequencesLong_bmi2(ZSTD_DCtx* dctx,
1590 void* dst, size_t maxDstSize,
1591 const void* seqStart, size_t seqSize, int nbSeq,
1592 const ZSTD_longOffset_e isLongOffset)
1593{
1594 return ZSTD_decompressSequencesLong_body(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1595}
1596
1597#endif
1598
1599typedef size_t (*ZSTD_decompressSequences_t)(
1600 ZSTD_DCtx *dctx, void *dst, size_t maxDstSize,
1601 const void *seqStart, size_t seqSize, int nbSeq,
1602 const ZSTD_longOffset_e isLongOffset);
1603
1604static size_t ZSTD_decompressSequences(ZSTD_DCtx* dctx, void* dst, size_t maxDstSize,
1605 const void* seqStart, size_t seqSize, int nbSeq,
1606 const ZSTD_longOffset_e isLongOffset)
1607{
1608 DEBUGLOG(5, "ZSTD_decompressSequences");
1609#if DYNAMIC_BMI2
1610 if (dctx->bmi2) {
1611 return ZSTD_decompressSequences_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1612 }
1613#endif
1614 return ZSTD_decompressSequences_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1615}
1616
1617static size_t ZSTD_decompressSequencesLong(ZSTD_DCtx* dctx,
1618 void* dst, size_t maxDstSize,
1619 const void* seqStart, size_t seqSize, int nbSeq,
1620 const ZSTD_longOffset_e isLongOffset)
1621{
1622 DEBUGLOG(5, "ZSTD_decompressSequencesLong");
1623#if DYNAMIC_BMI2
1624 if (dctx->bmi2) {
1625 return ZSTD_decompressSequencesLong_bmi2(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1626 }
1627#endif
1628 return ZSTD_decompressSequencesLong_default(dctx, dst, maxDstSize, seqStart, seqSize, nbSeq, isLongOffset);
1629}
1630
1631/* ZSTD_getLongOffsetsShare() :
1632 * condition : offTable must be valid
1633 * @return : "share" of long offsets (arbitrarily defined as > (1<<23))
1634 * compared to maximum possible of (1<<OffFSELog) */
1635static unsigned
1636ZSTD_getLongOffsetsShare(const ZSTD_seqSymbol* offTable)
1637{
1638 const void* ptr = offTable;
1639 U32 const tableLog = ((const ZSTD_seqSymbol_header*)ptr)[0].tableLog;
1640 const ZSTD_seqSymbol* table = offTable + 1;
1641 U32 const max = 1 << tableLog;
1642 U32 u, total = 0;
1643 DEBUGLOG(5, "ZSTD_getLongOffsetsShare: (tableLog=%u)", tableLog);
1644
1645 assert(max <= (1 << OffFSELog)); /* max not too large */
1646 for (u=0; u<max; u++) {
1647 if (table[u].nbAdditionalBits > 22) total += 1;
1648 }
1649
1650 assert(tableLog <= OffFSELog);
1651 total <<= (OffFSELog - tableLog); /* scale to OffFSELog */
1652
1653 return total;
1654}
1655
1656
1657static size_t ZSTD_decompressBlock_internal(ZSTD_DCtx* dctx,
1658 void* dst, size_t dstCapacity,
1659 const void* src, size_t srcSize, const int frame)
1660{ /* blockType == blockCompressed */
1661 const BYTE* ip = (const BYTE*)src;
1662 /* isLongOffset must be true if there are long offsets.
1663 * Offsets are long if they are larger than 2^STREAM_ACCUMULATOR_MIN.
1664 * We don't expect that to be the case in 64-bit mode.
1665 * In block mode, window size is not known, so we have to be conservative. (note: but it could be evaluated from current-lowLimit)
1666 */
1667 ZSTD_longOffset_e const isLongOffset = (ZSTD_longOffset_e)(MEM_32bits() && (!frame || dctx->fParams.windowSize > (1ULL << STREAM_ACCUMULATOR_MIN)));
1668 DEBUGLOG(5, "ZSTD_decompressBlock_internal (size : %u)", (U32)srcSize);
1669
1670 if (srcSize >= ZSTD_BLOCKSIZE_MAX) return ERROR(srcSize_wrong);
1671
1672 /* Decode literals section */
1673 { size_t const litCSize = ZSTD_decodeLiteralsBlock(dctx, src, srcSize);
1674 DEBUGLOG(5, "ZSTD_decodeLiteralsBlock : %u", (U32)litCSize);
1675 if (ZSTD_isError(litCSize)) return litCSize;
1676 ip += litCSize;
1677 srcSize -= litCSize;
1678 }
1679
1680 /* Build Decoding Tables */
1681 { int nbSeq;
1682 size_t const seqHSize = ZSTD_decodeSeqHeaders(dctx, &nbSeq, ip, srcSize);
1683 if (ZSTD_isError(seqHSize)) return seqHSize;
1684 ip += seqHSize;
1685 srcSize -= seqHSize;
1686
1687 if ( (!frame || dctx->fParams.windowSize > (1<<24))
1688 && (nbSeq>0) ) { /* could probably use a larger nbSeq limit */
1689 U32 const shareLongOffsets = ZSTD_getLongOffsetsShare(dctx->OFTptr);
1690 U32 const minShare = MEM_64bits() ? 7 : 20; /* heuristic values, correspond to 2.73% and 7.81% */
1691 if (shareLongOffsets >= minShare)
1692 return ZSTD_decompressSequencesLong(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
1693 }
1694
1695 return ZSTD_decompressSequences(dctx, dst, dstCapacity, ip, srcSize, nbSeq, isLongOffset);
1696 }
1697}
1698
1699
1700static void ZSTD_checkContinuity(ZSTD_DCtx* dctx, const void* dst)
1701{
1702 if (dst != dctx->previousDstEnd) { /* not contiguous */
1703 dctx->dictEnd = dctx->previousDstEnd;
1704 dctx->vBase = (const char*)dst - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
1705 dctx->base = dst;
1706 dctx->previousDstEnd = dst;
1707 }
1708}
1709
1710size_t ZSTD_decompressBlock(ZSTD_DCtx* dctx,
1711 void* dst, size_t dstCapacity,
1712 const void* src, size_t srcSize)
1713{
1714 size_t dSize;
1715 ZSTD_checkContinuity(dctx, dst);
1716 dSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 0);
1717 dctx->previousDstEnd = (char*)dst + dSize;
1718 return dSize;
1719}
1720
1721
1722/** ZSTD_insertBlock() :
1723 insert `src` block into `dctx` history. Useful to track uncompressed blocks. */
1724ZSTDLIB_API size_t ZSTD_insertBlock(ZSTD_DCtx* dctx, const void* blockStart, size_t blockSize)
1725{
1726 ZSTD_checkContinuity(dctx, blockStart);
1727 dctx->previousDstEnd = (const char*)blockStart + blockSize;
1728 return blockSize;
1729}
1730
1731
1732static size_t ZSTD_generateNxBytes(void* dst, size_t dstCapacity, BYTE byte, size_t length)
1733{
1734 if (length > dstCapacity) return ERROR(dstSize_tooSmall);
1735 memset(dst, byte, length);
1736 return length;
1737}
1738
1739/** ZSTD_findFrameCompressedSize() :
1740 * compatible with legacy mode
1741 * `src` must point to the start of a ZSTD frame, ZSTD legacy frame, or skippable frame
1742 * `srcSize` must be at least as large as the frame contained
1743 * @return : the compressed size of the frame starting at `src` */
1744size_t ZSTD_findFrameCompressedSize(const void *src, size_t srcSize)
1745{
1746#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
1747 if (ZSTD_isLegacy(src, srcSize))
1748 return ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
1749#endif
1750 if ( (srcSize >= ZSTD_skippableHeaderSize)
1751 && (MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START ) {
1752 return ZSTD_skippableHeaderSize + MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize);
1753 } else {
1754 const BYTE* ip = (const BYTE*)src;
1755 const BYTE* const ipstart = ip;
1756 size_t remainingSize = srcSize;
1757 ZSTD_frameHeader zfh;
1758
1759 /* Extract Frame Header */
1760 { size_t const ret = ZSTD_getFrameHeader(&zfh, src, srcSize);
1761 if (ZSTD_isError(ret)) return ret;
1762 if (ret > 0) return ERROR(srcSize_wrong);
1763 }
1764
1765 ip += zfh.headerSize;
1766 remainingSize -= zfh.headerSize;
1767
1768 /* Loop on each block */
1769 while (1) {
1770 blockProperties_t blockProperties;
1771 size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
1772 if (ZSTD_isError(cBlockSize)) return cBlockSize;
1773
1774 if (ZSTD_blockHeaderSize + cBlockSize > remainingSize)
1775 return ERROR(srcSize_wrong);
1776
1777 ip += ZSTD_blockHeaderSize + cBlockSize;
1778 remainingSize -= ZSTD_blockHeaderSize + cBlockSize;
1779
1780 if (blockProperties.lastBlock) break;
1781 }
1782
1783 if (zfh.checksumFlag) { /* Final frame content checksum */
1784 if (remainingSize < 4) return ERROR(srcSize_wrong);
1785 ip += 4;
1786 remainingSize -= 4;
1787 }
1788
1789 return ip - ipstart;
1790 }
1791}
1792
1793/*! ZSTD_decompressFrame() :
1794* @dctx must be properly initialized */
1795static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx,
1796 void* dst, size_t dstCapacity,
1797 const void** srcPtr, size_t *srcSizePtr)
1798{
1799 const BYTE* ip = (const BYTE*)(*srcPtr);
1800 BYTE* const ostart = (BYTE* const)dst;
1801 BYTE* const oend = ostart + dstCapacity;
1802 BYTE* op = ostart;
1803 size_t remainingSize = *srcSizePtr;
1804
1805 /* check */
1806 if (remainingSize < ZSTD_frameHeaderSize_min+ZSTD_blockHeaderSize)
1807 return ERROR(srcSize_wrong);
1808
1809 /* Frame Header */
1810 { size_t const frameHeaderSize = ZSTD_frameHeaderSize(ip, ZSTD_frameHeaderSize_prefix);
1811 if (ZSTD_isError(frameHeaderSize)) return frameHeaderSize;
1812 if (remainingSize < frameHeaderSize+ZSTD_blockHeaderSize)
1813 return ERROR(srcSize_wrong);
1814 CHECK_F( ZSTD_decodeFrameHeader(dctx, ip, frameHeaderSize) );
1815 ip += frameHeaderSize; remainingSize -= frameHeaderSize;
1816 }
1817
1818 /* Loop on each block */
1819 while (1) {
1820 size_t decodedSize;
1821 blockProperties_t blockProperties;
1822 size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSize, &blockProperties);
1823 if (ZSTD_isError(cBlockSize)) return cBlockSize;
1824
1825 ip += ZSTD_blockHeaderSize;
1826 remainingSize -= ZSTD_blockHeaderSize;
1827 if (cBlockSize > remainingSize) return ERROR(srcSize_wrong);
1828
1829 switch(blockProperties.blockType)
1830 {
1831 case bt_compressed:
1832 decodedSize = ZSTD_decompressBlock_internal(dctx, op, oend-op, ip, cBlockSize, /* frame */ 1);
1833 break;
1834 case bt_raw :
1835 decodedSize = ZSTD_copyRawBlock(op, oend-op, ip, cBlockSize);
1836 break;
1837 case bt_rle :
1838 decodedSize = ZSTD_generateNxBytes(op, oend-op, *ip, blockProperties.origSize);
1839 break;
1840 case bt_reserved :
1841 default:
1842 return ERROR(corruption_detected);
1843 }
1844
1845 if (ZSTD_isError(decodedSize)) return decodedSize;
1846 if (dctx->fParams.checksumFlag)
1847 XXH64_update(&dctx->xxhState, op, decodedSize);
1848 op += decodedSize;
1849 ip += cBlockSize;
1850 remainingSize -= cBlockSize;
1851 if (blockProperties.lastBlock) break;
1852 }
1853
1854 if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
1855 if ((U64)(op-ostart) != dctx->fParams.frameContentSize) {
1856 return ERROR(corruption_detected);
1857 } }
1858 if (dctx->fParams.checksumFlag) { /* Frame content checksum verification */
1859 U32 const checkCalc = (U32)XXH64_digest(&dctx->xxhState);
1860 U32 checkRead;
1861 if (remainingSize<4) return ERROR(checksum_wrong);
1862 checkRead = MEM_readLE32(ip);
1863 if (checkRead != checkCalc) return ERROR(checksum_wrong);
1864 ip += 4;
1865 remainingSize -= 4;
1866 }
1867
1868 /* Allow caller to get size read */
1869 *srcPtr = ip;
1870 *srcSizePtr = remainingSize;
1871 return op-ostart;
1872}
1873
1874static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict);
1875static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict);
1876
1877static size_t ZSTD_decompressMultiFrame(ZSTD_DCtx* dctx,
1878 void* dst, size_t dstCapacity,
1879 const void* src, size_t srcSize,
1880 const void* dict, size_t dictSize,
1881 const ZSTD_DDict* ddict)
1882{
1883 void* const dststart = dst;
1884 assert(dict==NULL || ddict==NULL); /* either dict or ddict set, not both */
1885
1886 if (ddict) {
1887 dict = ZSTD_DDictDictContent(ddict);
1888 dictSize = ZSTD_DDictDictSize(ddict);
1889 }
1890
1891 while (srcSize >= ZSTD_frameHeaderSize_prefix) {
1892 U32 magicNumber;
1893
1894#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT >= 1)
1895 if (ZSTD_isLegacy(src, srcSize)) {
1896 size_t decodedSize;
1897 size_t const frameSize = ZSTD_findFrameCompressedSizeLegacy(src, srcSize);
1898 if (ZSTD_isError(frameSize)) return frameSize;
1899 /* legacy support is not compatible with static dctx */
1900 if (dctx->staticSize) return ERROR(memory_allocation);
1901
1902 decodedSize = ZSTD_decompressLegacy(dst, dstCapacity, src, frameSize, dict, dictSize);
1903
1904 dst = (BYTE*)dst + decodedSize;
1905 dstCapacity -= decodedSize;
1906
1907 src = (const BYTE*)src + frameSize;
1908 srcSize -= frameSize;
1909
1910 continue;
1911 }
1912#endif
1913
1914 magicNumber = MEM_readLE32(src);
1915 DEBUGLOG(4, "reading magic number %08X (expecting %08X)",
1916 (U32)magicNumber, (U32)ZSTD_MAGICNUMBER);
1917 if (magicNumber != ZSTD_MAGICNUMBER) {
1918 if ((magicNumber & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) {
1919 size_t skippableSize;
1920 if (srcSize < ZSTD_skippableHeaderSize)
1921 return ERROR(srcSize_wrong);
1922 skippableSize = MEM_readLE32((const BYTE*)src + ZSTD_frameIdSize)
1923 + ZSTD_skippableHeaderSize;
1924 if (srcSize < skippableSize) return ERROR(srcSize_wrong);
1925
1926 src = (const BYTE *)src + skippableSize;
1927 srcSize -= skippableSize;
1928 continue;
1929 }
1930 return ERROR(prefix_unknown);
1931 }
1932
1933 if (ddict) {
1934 /* we were called from ZSTD_decompress_usingDDict */
1935 CHECK_F(ZSTD_decompressBegin_usingDDict(dctx, ddict));
1936 } else {
1937 /* this will initialize correctly with no dict if dict == NULL, so
1938 * use this in all cases but ddict */
1939 CHECK_F(ZSTD_decompressBegin_usingDict(dctx, dict, dictSize));
1940 }
1941 ZSTD_checkContinuity(dctx, dst);
1942
1943 { const size_t res = ZSTD_decompressFrame(dctx, dst, dstCapacity,
1944 &src, &srcSize);
1945 if (ZSTD_isError(res)) return res;
1946 /* no need to bound check, ZSTD_decompressFrame already has */
1947 dst = (BYTE*)dst + res;
1948 dstCapacity -= res;
1949 }
1950 } /* while (srcSize >= ZSTD_frameHeaderSize_prefix) */
1951
1952 if (srcSize) return ERROR(srcSize_wrong); /* input not entirely consumed */
1953
1954 return (BYTE*)dst - (BYTE*)dststart;
1955}
1956
1957size_t ZSTD_decompress_usingDict(ZSTD_DCtx* dctx,
1958 void* dst, size_t dstCapacity,
1959 const void* src, size_t srcSize,
1960 const void* dict, size_t dictSize)
1961{
1962 return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize, dict, dictSize, NULL);
1963}
1964
1965
1966size_t ZSTD_decompressDCtx(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1967{
1968 return ZSTD_decompress_usingDict(dctx, dst, dstCapacity, src, srcSize, NULL, 0);
1969}
1970
1971
1972size_t ZSTD_decompress(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
1973{
1974#if defined(ZSTD_HEAPMODE) && (ZSTD_HEAPMODE>=1)
1975 size_t regenSize;
1976 ZSTD_DCtx* const dctx = ZSTD_createDCtx();
1977 if (dctx==NULL) return ERROR(memory_allocation);
1978 regenSize = ZSTD_decompressDCtx(dctx, dst, dstCapacity, src, srcSize);
1979 ZSTD_freeDCtx(dctx);
1980 return regenSize;
1981#else /* stack mode */
1982 ZSTD_DCtx dctx;
1983 return ZSTD_decompressDCtx(&dctx, dst, dstCapacity, src, srcSize);
1984#endif
1985}
1986
1987
1988/*-**************************************
1989* Advanced Streaming Decompression API
1990* Bufferless and synchronous
1991****************************************/
1992size_t ZSTD_nextSrcSizeToDecompress(ZSTD_DCtx* dctx) { return dctx->expected; }
1993
1994ZSTD_nextInputType_e ZSTD_nextInputType(ZSTD_DCtx* dctx) {
1995 switch(dctx->stage)
1996 {
1997 default: /* should not happen */
1998 assert(0);
1999 case ZSTDds_getFrameHeaderSize:
2000 case ZSTDds_decodeFrameHeader:
2001 return ZSTDnit_frameHeader;
2002 case ZSTDds_decodeBlockHeader:
2003 return ZSTDnit_blockHeader;
2004 case ZSTDds_decompressBlock:
2005 return ZSTDnit_block;
2006 case ZSTDds_decompressLastBlock:
2007 return ZSTDnit_lastBlock;
2008 case ZSTDds_checkChecksum:
2009 return ZSTDnit_checksum;
2010 case ZSTDds_decodeSkippableHeader:
2011 case ZSTDds_skipFrame:
2012 return ZSTDnit_skippableFrame;
2013 }
2014}
2015
2016static int ZSTD_isSkipFrame(ZSTD_DCtx* dctx) { return dctx->stage == ZSTDds_skipFrame; }
2017
2018/** ZSTD_decompressContinue() :
2019 * srcSize : must be the exact nb of bytes expected (see ZSTD_nextSrcSizeToDecompress())
2020 * @return : nb of bytes generated into `dst` (necessarily <= `dstCapacity)
2021 * or an error code, which can be tested using ZSTD_isError() */
2022size_t ZSTD_decompressContinue(ZSTD_DCtx* dctx, void* dst, size_t dstCapacity, const void* src, size_t srcSize)
2023{
2024 DEBUGLOG(5, "ZSTD_decompressContinue (srcSize:%u)", (U32)srcSize);
2025 /* Sanity check */
2026 if (srcSize != dctx->expected) return ERROR(srcSize_wrong); /* not allowed */
2027 if (dstCapacity) ZSTD_checkContinuity(dctx, dst);
2028
2029 switch (dctx->stage)
2030 {
2031 case ZSTDds_getFrameHeaderSize :
2032 assert(src != NULL);
2033 if (dctx->format == ZSTD_f_zstd1) { /* allows header */
2034 assert(srcSize >= ZSTD_frameIdSize); /* to read skippable magic number */
2035 if ((MEM_readLE32(src) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
2036 memcpy(dctx->headerBuffer, src, srcSize);
2037 dctx->expected = ZSTD_skippableHeaderSize - srcSize; /* remaining to load to get full skippable frame header */
2038 dctx->stage = ZSTDds_decodeSkippableHeader;
2039 return 0;
2040 } }
2041 dctx->headerSize = ZSTD_frameHeaderSize_internal(src, srcSize, dctx->format);
2042 if (ZSTD_isError(dctx->headerSize)) return dctx->headerSize;
2043 memcpy(dctx->headerBuffer, src, srcSize);
2044 dctx->expected = dctx->headerSize - srcSize;
2045 dctx->stage = ZSTDds_decodeFrameHeader;
2046 return 0;
2047
2048 case ZSTDds_decodeFrameHeader:
2049 assert(src != NULL);
2050 memcpy(dctx->headerBuffer + (dctx->headerSize - srcSize), src, srcSize);
2051 CHECK_F(ZSTD_decodeFrameHeader(dctx, dctx->headerBuffer, dctx->headerSize));
2052 dctx->expected = ZSTD_blockHeaderSize;
2053 dctx->stage = ZSTDds_decodeBlockHeader;
2054 return 0;
2055
2056 case ZSTDds_decodeBlockHeader:
2057 { blockProperties_t bp;
2058 size_t const cBlockSize = ZSTD_getcBlockSize(src, ZSTD_blockHeaderSize, &bp);
2059 if (ZSTD_isError(cBlockSize)) return cBlockSize;
2060 dctx->expected = cBlockSize;
2061 dctx->bType = bp.blockType;
2062 dctx->rleSize = bp.origSize;
2063 if (cBlockSize) {
2064 dctx->stage = bp.lastBlock ? ZSTDds_decompressLastBlock : ZSTDds_decompressBlock;
2065 return 0;
2066 }
2067 /* empty block */
2068 if (bp.lastBlock) {
2069 if (dctx->fParams.checksumFlag) {
2070 dctx->expected = 4;
2071 dctx->stage = ZSTDds_checkChecksum;
2072 } else {
2073 dctx->expected = 0; /* end of frame */
2074 dctx->stage = ZSTDds_getFrameHeaderSize;
2075 }
2076 } else {
2077 dctx->expected = ZSTD_blockHeaderSize; /* jump to next header */
2078 dctx->stage = ZSTDds_decodeBlockHeader;
2079 }
2080 return 0;
2081 }
2082
2083 case ZSTDds_decompressLastBlock:
2084 case ZSTDds_decompressBlock:
2085 DEBUGLOG(5, "ZSTD_decompressContinue: case ZSTDds_decompressBlock");
2086 { size_t rSize;
2087 switch(dctx->bType)
2088 {
2089 case bt_compressed:
2090 DEBUGLOG(5, "ZSTD_decompressContinue: case bt_compressed");
2091 rSize = ZSTD_decompressBlock_internal(dctx, dst, dstCapacity, src, srcSize, /* frame */ 1);
2092 break;
2093 case bt_raw :
2094 rSize = ZSTD_copyRawBlock(dst, dstCapacity, src, srcSize);
2095 break;
2096 case bt_rle :
2097 rSize = ZSTD_setRleBlock(dst, dstCapacity, src, srcSize, dctx->rleSize);
2098 break;
2099 case bt_reserved : /* should never happen */
2100 default:
2101 return ERROR(corruption_detected);
2102 }
2103 if (ZSTD_isError(rSize)) return rSize;
2104 DEBUGLOG(5, "ZSTD_decompressContinue: decoded size from block : %u", (U32)rSize);
2105 dctx->decodedSize += rSize;
2106 if (dctx->fParams.checksumFlag) XXH64_update(&dctx->xxhState, dst, rSize);
2107
2108 if (dctx->stage == ZSTDds_decompressLastBlock) { /* end of frame */
2109 DEBUGLOG(4, "ZSTD_decompressContinue: decoded size from frame : %u", (U32)dctx->decodedSize);
2110 if (dctx->fParams.frameContentSize != ZSTD_CONTENTSIZE_UNKNOWN) {
2111 if (dctx->decodedSize != dctx->fParams.frameContentSize) {
2112 return ERROR(corruption_detected);
2113 } }
2114 if (dctx->fParams.checksumFlag) { /* another round for frame checksum */
2115 dctx->expected = 4;
2116 dctx->stage = ZSTDds_checkChecksum;
2117 } else {
2118 dctx->expected = 0; /* ends here */
2119 dctx->stage = ZSTDds_getFrameHeaderSize;
2120 }
2121 } else {
2122 dctx->stage = ZSTDds_decodeBlockHeader;
2123 dctx->expected = ZSTD_blockHeaderSize;
2124 dctx->previousDstEnd = (char*)dst + rSize;
2125 }
2126 return rSize;
2127 }
2128
2129 case ZSTDds_checkChecksum:
2130 assert(srcSize == 4); /* guaranteed by dctx->expected */
2131 { U32 const h32 = (U32)XXH64_digest(&dctx->xxhState);
2132 U32 const check32 = MEM_readLE32(src);
2133 DEBUGLOG(4, "ZSTD_decompressContinue: checksum : calculated %08X :: %08X read", h32, check32);
2134 if (check32 != h32) return ERROR(checksum_wrong);
2135 dctx->expected = 0;
2136 dctx->stage = ZSTDds_getFrameHeaderSize;
2137 return 0;
2138 }
2139
2140 case ZSTDds_decodeSkippableHeader:
2141 assert(src != NULL);
2142 assert(srcSize <= ZSTD_skippableHeaderSize);
2143 memcpy(dctx->headerBuffer + (ZSTD_skippableHeaderSize - srcSize), src, srcSize); /* complete skippable header */
2144 dctx->expected = MEM_readLE32(dctx->headerBuffer + ZSTD_frameIdSize); /* note : dctx->expected can grow seriously large, beyond local buffer size */
2145 dctx->stage = ZSTDds_skipFrame;
2146 return 0;
2147
2148 case ZSTDds_skipFrame:
2149 dctx->expected = 0;
2150 dctx->stage = ZSTDds_getFrameHeaderSize;
2151 return 0;
2152
2153 default:
2154 return ERROR(GENERIC); /* impossible */
2155 }
2156}
2157
2158
2159static size_t ZSTD_refDictContent(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
2160{
2161 dctx->dictEnd = dctx->previousDstEnd;
2162 dctx->vBase = (const char*)dict - ((const char*)(dctx->previousDstEnd) - (const char*)(dctx->base));
2163 dctx->base = dict;
2164 dctx->previousDstEnd = (const char*)dict + dictSize;
2165 return 0;
2166}
2167
2168/* ZSTD_loadEntropy() :
2169 * dict : must point at beginning of a valid zstd dictionary
2170 * @return : size of entropy tables read */
2171static size_t ZSTD_loadEntropy(ZSTD_entropyDTables_t* entropy, const void* const dict, size_t const dictSize)
2172{
2173 const BYTE* dictPtr = (const BYTE*)dict;
2174 const BYTE* const dictEnd = dictPtr + dictSize;
2175
2176 if (dictSize <= 8) return ERROR(dictionary_corrupted);
2177 dictPtr += 8; /* skip header = magic + dictID */
2178
2179
2180 { size_t const hSize = HUF_readDTableX4_wksp(
2181 entropy->hufTable, dictPtr, dictEnd - dictPtr,
2182 entropy->workspace, sizeof(entropy->workspace));
2183 if (HUF_isError(hSize)) return ERROR(dictionary_corrupted);
2184 dictPtr += hSize;
2185 }
2186
2187 { short offcodeNCount[MaxOff+1];
2188 U32 offcodeMaxValue = MaxOff, offcodeLog;
2189 size_t const offcodeHeaderSize = FSE_readNCount(offcodeNCount, &offcodeMaxValue, &offcodeLog, dictPtr, dictEnd-dictPtr);
2190 if (FSE_isError(offcodeHeaderSize)) return ERROR(dictionary_corrupted);
2191 if (offcodeMaxValue > MaxOff) return ERROR(dictionary_corrupted);
2192 if (offcodeLog > OffFSELog) return ERROR(dictionary_corrupted);
2193 ZSTD_buildFSETable(entropy->OFTable,
2194 offcodeNCount, offcodeMaxValue,
2195 OF_base, OF_bits,
2196 offcodeLog);
2197 dictPtr += offcodeHeaderSize;
2198 }
2199
2200 { short matchlengthNCount[MaxML+1];
2201 unsigned matchlengthMaxValue = MaxML, matchlengthLog;
2202 size_t const matchlengthHeaderSize = FSE_readNCount(matchlengthNCount, &matchlengthMaxValue, &matchlengthLog, dictPtr, dictEnd-dictPtr);
2203 if (FSE_isError(matchlengthHeaderSize)) return ERROR(dictionary_corrupted);
2204 if (matchlengthMaxValue > MaxML) return ERROR(dictionary_corrupted);
2205 if (matchlengthLog > MLFSELog) return ERROR(dictionary_corrupted);
2206 ZSTD_buildFSETable(entropy->MLTable,
2207 matchlengthNCount, matchlengthMaxValue,
2208 ML_base, ML_bits,
2209 matchlengthLog);
2210 dictPtr += matchlengthHeaderSize;
2211 }
2212
2213 { short litlengthNCount[MaxLL+1];
2214 unsigned litlengthMaxValue = MaxLL, litlengthLog;
2215 size_t const litlengthHeaderSize = FSE_readNCount(litlengthNCount, &litlengthMaxValue, &litlengthLog, dictPtr, dictEnd-dictPtr);
2216 if (FSE_isError(litlengthHeaderSize)) return ERROR(dictionary_corrupted);
2217 if (litlengthMaxValue > MaxLL) return ERROR(dictionary_corrupted);
2218 if (litlengthLog > LLFSELog) return ERROR(dictionary_corrupted);
2219 ZSTD_buildFSETable(entropy->LLTable,
2220 litlengthNCount, litlengthMaxValue,
2221 LL_base, LL_bits,
2222 litlengthLog);
2223 dictPtr += litlengthHeaderSize;
2224 }
2225
2226 if (dictPtr+12 > dictEnd) return ERROR(dictionary_corrupted);
2227 { int i;
2228 size_t const dictContentSize = (size_t)(dictEnd - (dictPtr+12));
2229 for (i=0; i<3; i++) {
2230 U32 const rep = MEM_readLE32(dictPtr); dictPtr += 4;
2231 if (rep==0 || rep >= dictContentSize) return ERROR(dictionary_corrupted);
2232 entropy->rep[i] = rep;
2233 } }
2234
2235 return dictPtr - (const BYTE*)dict;
2236}
2237
2238static size_t ZSTD_decompress_insertDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
2239{
2240 if (dictSize < 8) return ZSTD_refDictContent(dctx, dict, dictSize);
2241 { U32 const magic = MEM_readLE32(dict);
2242 if (magic != ZSTD_MAGIC_DICTIONARY) {
2243 return ZSTD_refDictContent(dctx, dict, dictSize); /* pure content mode */
2244 } }
2245 dctx->dictID = MEM_readLE32((const char*)dict + ZSTD_frameIdSize);
2246
2247 /* load entropy tables */
2248 { size_t const eSize = ZSTD_loadEntropy(&dctx->entropy, dict, dictSize);
2249 if (ZSTD_isError(eSize)) return ERROR(dictionary_corrupted);
2250 dict = (const char*)dict + eSize;
2251 dictSize -= eSize;
2252 }
2253 dctx->litEntropy = dctx->fseEntropy = 1;
2254
2255 /* reference dictionary content */
2256 return ZSTD_refDictContent(dctx, dict, dictSize);
2257}
2258
2259/* Note : this function cannot fail */
2260size_t ZSTD_decompressBegin(ZSTD_DCtx* dctx)
2261{
2262 assert(dctx != NULL);
2263 dctx->expected = ZSTD_startingInputLength(dctx->format); /* dctx->format must be properly set */
2264 dctx->stage = ZSTDds_getFrameHeaderSize;
2265 dctx->decodedSize = 0;
2266 dctx->previousDstEnd = NULL;
2267 dctx->base = NULL;
2268 dctx->vBase = NULL;
2269 dctx->dictEnd = NULL;
2270 dctx->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
2271 dctx->litEntropy = dctx->fseEntropy = 0;
2272 dctx->dictID = 0;
2273 ZSTD_STATIC_ASSERT(sizeof(dctx->entropy.rep) == sizeof(repStartValue));
2274 memcpy(dctx->entropy.rep, repStartValue, sizeof(repStartValue)); /* initial repcodes */
2275 dctx->LLTptr = dctx->entropy.LLTable;
2276 dctx->MLTptr = dctx->entropy.MLTable;
2277 dctx->OFTptr = dctx->entropy.OFTable;
2278 dctx->HUFptr = dctx->entropy.hufTable;
2279 return 0;
2280}
2281
2282size_t ZSTD_decompressBegin_usingDict(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
2283{
2284 CHECK_F( ZSTD_decompressBegin(dctx) );
2285 if (dict && dictSize)
2286 CHECK_E(ZSTD_decompress_insertDictionary(dctx, dict, dictSize), dictionary_corrupted);
2287 return 0;
2288}
2289
2290
2291/* ====== ZSTD_DDict ====== */
2292
2293struct ZSTD_DDict_s {
2294 void* dictBuffer;
2295 const void* dictContent;
2296 size_t dictSize;
2297 ZSTD_entropyDTables_t entropy;
2298 U32 dictID;
2299 U32 entropyPresent;
2300 ZSTD_customMem cMem;
2301}; /* typedef'd to ZSTD_DDict within "zstd.h" */
2302
2303static const void* ZSTD_DDictDictContent(const ZSTD_DDict* ddict)
2304{
2305 return ddict->dictContent;
2306}
2307
2308static size_t ZSTD_DDictDictSize(const ZSTD_DDict* ddict)
2309{
2310 return ddict->dictSize;
2311}
2312
2313size_t ZSTD_decompressBegin_usingDDict(ZSTD_DCtx* dstDCtx, const ZSTD_DDict* ddict)
2314{
2315 CHECK_F( ZSTD_decompressBegin(dstDCtx) );
2316 if (ddict) { /* support begin on NULL */
2317 dstDCtx->dictID = ddict->dictID;
2318 dstDCtx->base = ddict->dictContent;
2319 dstDCtx->vBase = ddict->dictContent;
2320 dstDCtx->dictEnd = (const BYTE*)ddict->dictContent + ddict->dictSize;
2321 dstDCtx->previousDstEnd = dstDCtx->dictEnd;
2322 if (ddict->entropyPresent) {
2323 dstDCtx->litEntropy = 1;
2324 dstDCtx->fseEntropy = 1;
2325 dstDCtx->LLTptr = ddict->entropy.LLTable;
2326 dstDCtx->MLTptr = ddict->entropy.MLTable;
2327 dstDCtx->OFTptr = ddict->entropy.OFTable;
2328 dstDCtx->HUFptr = ddict->entropy.hufTable;
2329 dstDCtx->entropy.rep[0] = ddict->entropy.rep[0];
2330 dstDCtx->entropy.rep[1] = ddict->entropy.rep[1];
2331 dstDCtx->entropy.rep[2] = ddict->entropy.rep[2];
2332 } else {
2333 dstDCtx->litEntropy = 0;
2334 dstDCtx->fseEntropy = 0;
2335 }
2336 }
2337 return 0;
2338}
2339
2340static size_t ZSTD_loadEntropy_inDDict(ZSTD_DDict* ddict, ZSTD_dictContentType_e dictContentType)
2341{
2342 ddict->dictID = 0;
2343 ddict->entropyPresent = 0;
2344 if (dictContentType == ZSTD_dct_rawContent) return 0;
2345
2346 if (ddict->dictSize < 8) {
2347 if (dictContentType == ZSTD_dct_fullDict)
2348 return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
2349 return 0; /* pure content mode */
2350 }
2351 { U32 const magic = MEM_readLE32(ddict->dictContent);
2352 if (magic != ZSTD_MAGIC_DICTIONARY) {
2353 if (dictContentType == ZSTD_dct_fullDict)
2354 return ERROR(dictionary_corrupted); /* only accept specified dictionaries */
2355 return 0; /* pure content mode */
2356 }
2357 }
2358 ddict->dictID = MEM_readLE32((const char*)ddict->dictContent + ZSTD_frameIdSize);
2359
2360 /* load entropy tables */
2361 CHECK_E( ZSTD_loadEntropy(&ddict->entropy, ddict->dictContent, ddict->dictSize), dictionary_corrupted );
2362 ddict->entropyPresent = 1;
2363 return 0;
2364}
2365
2366
2367static size_t ZSTD_initDDict_internal(ZSTD_DDict* ddict,
2368 const void* dict, size_t dictSize,
2369 ZSTD_dictLoadMethod_e dictLoadMethod,
2370 ZSTD_dictContentType_e dictContentType)
2371{
2372 if ((dictLoadMethod == ZSTD_dlm_byRef) || (!dict) || (!dictSize)) {
2373 ddict->dictBuffer = NULL;
2374 ddict->dictContent = dict;
2375 } else {
2376 void* const internalBuffer = ZSTD_malloc(dictSize, ddict->cMem);
2377 ddict->dictBuffer = internalBuffer;
2378 ddict->dictContent = internalBuffer;
2379 if (!internalBuffer) return ERROR(memory_allocation);
2380 memcpy(internalBuffer, dict, dictSize);
2381 }
2382 ddict->dictSize = dictSize;
2383 ddict->entropy.hufTable[0] = (HUF_DTable)((HufLog)*0x1000001); /* cover both little and big endian */
2384
2385 /* parse dictionary content */
2386 CHECK_F( ZSTD_loadEntropy_inDDict(ddict, dictContentType) );
2387
2388 return 0;
2389}
2390
2391ZSTD_DDict* ZSTD_createDDict_advanced(const void* dict, size_t dictSize,
2392 ZSTD_dictLoadMethod_e dictLoadMethod,
2393 ZSTD_dictContentType_e dictContentType,
2394 ZSTD_customMem customMem)
2395{
2396 if (!customMem.customAlloc ^ !customMem.customFree) return NULL;
2397
2398 { ZSTD_DDict* const ddict = (ZSTD_DDict*) ZSTD_malloc(sizeof(ZSTD_DDict), customMem);
2399 if (!ddict) return NULL;
2400 ddict->cMem = customMem;
2401
2402 if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, dictLoadMethod, dictContentType) )) {
2403 ZSTD_freeDDict(ddict);
2404 return NULL;
2405 }
2406
2407 return ddict;
2408 }
2409}
2410
2411/*! ZSTD_createDDict() :
2412* Create a digested dictionary, to start decompression without startup delay.
2413* `dict` content is copied inside DDict.
2414* Consequently, `dict` can be released after `ZSTD_DDict` creation */
2415ZSTD_DDict* ZSTD_createDDict(const void* dict, size_t dictSize)
2416{
2417 ZSTD_customMem const allocator = { NULL, NULL, NULL };
2418 return ZSTD_createDDict_advanced(dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto, allocator);
2419}
2420
2421/*! ZSTD_createDDict_byReference() :
2422 * Create a digested dictionary, to start decompression without startup delay.
2423 * Dictionary content is simply referenced, it will be accessed during decompression.
2424 * Warning : dictBuffer must outlive DDict (DDict must be freed before dictBuffer) */
2425ZSTD_DDict* ZSTD_createDDict_byReference(const void* dictBuffer, size_t dictSize)
2426{
2427 ZSTD_customMem const allocator = { NULL, NULL, NULL };
2428 return ZSTD_createDDict_advanced(dictBuffer, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto, allocator);
2429}
2430
2431
2432const ZSTD_DDict* ZSTD_initStaticDDict(
2433 void* workspace, size_t workspaceSize,
2434 const void* dict, size_t dictSize,
2435 ZSTD_dictLoadMethod_e dictLoadMethod,
2436 ZSTD_dictContentType_e dictContentType)
2437{
2438 size_t const neededSpace =
2439 sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
2440 ZSTD_DDict* const ddict = (ZSTD_DDict*)workspace;
2441 assert(workspace != NULL);
2442 assert(dict != NULL);
2443 if ((size_t)workspace & 7) return NULL; /* 8-aligned */
2444 if (workspaceSize < neededSpace) return NULL;
2445 if (dictLoadMethod == ZSTD_dlm_byCopy) {
2446 memcpy(ddict+1, dict, dictSize); /* local copy */
2447 dict = ddict+1;
2448 }
2449 if (ZSTD_isError( ZSTD_initDDict_internal(ddict, dict, dictSize, ZSTD_dlm_byRef, dictContentType) ))
2450 return NULL;
2451 return ddict;
2452}
2453
2454
2455size_t ZSTD_freeDDict(ZSTD_DDict* ddict)
2456{
2457 if (ddict==NULL) return 0; /* support free on NULL */
2458 { ZSTD_customMem const cMem = ddict->cMem;
2459 ZSTD_free(ddict->dictBuffer, cMem);
2460 ZSTD_free(ddict, cMem);
2461 return 0;
2462 }
2463}
2464
2465/*! ZSTD_estimateDDictSize() :
2466 * Estimate amount of memory that will be needed to create a dictionary for decompression.
2467 * Note : dictionary created by reference using ZSTD_dlm_byRef are smaller */
2468size_t ZSTD_estimateDDictSize(size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod)
2469{
2470 return sizeof(ZSTD_DDict) + (dictLoadMethod == ZSTD_dlm_byRef ? 0 : dictSize);
2471}
2472
2473size_t ZSTD_sizeof_DDict(const ZSTD_DDict* ddict)
2474{
2475 if (ddict==NULL) return 0; /* support sizeof on NULL */
2476 return sizeof(*ddict) + (ddict->dictBuffer ? ddict->dictSize : 0) ;
2477}
2478
2479/*! ZSTD_getDictID_fromDict() :
2480 * Provides the dictID stored within dictionary.
2481 * if @return == 0, the dictionary is not conformant with Zstandard specification.
2482 * It can still be loaded, but as a content-only dictionary. */
2483unsigned ZSTD_getDictID_fromDict(const void* dict, size_t dictSize)
2484{
2485 if (dictSize < 8) return 0;
2486 if (MEM_readLE32(dict) != ZSTD_MAGIC_DICTIONARY) return 0;
2487 return MEM_readLE32((const char*)dict + ZSTD_frameIdSize);
2488}
2489
2490/*! ZSTD_getDictID_fromDDict() :
2491 * Provides the dictID of the dictionary loaded into `ddict`.
2492 * If @return == 0, the dictionary is not conformant to Zstandard specification, or empty.
2493 * Non-conformant dictionaries can still be loaded, but as content-only dictionaries. */
2494unsigned ZSTD_getDictID_fromDDict(const ZSTD_DDict* ddict)
2495{
2496 if (ddict==NULL) return 0;
2497 return ZSTD_getDictID_fromDict(ddict->dictContent, ddict->dictSize);
2498}
2499
2500/*! ZSTD_getDictID_fromFrame() :
2501 * Provides the dictID required to decompresse frame stored within `src`.
2502 * If @return == 0, the dictID could not be decoded.
2503 * This could for one of the following reasons :
2504 * - The frame does not require a dictionary (most common case).
2505 * - The frame was built with dictID intentionally removed.
2506 * Needed dictionary is a hidden information.
2507 * Note : this use case also happens when using a non-conformant dictionary.
2508 * - `srcSize` is too small, and as a result, frame header could not be decoded.
2509 * Note : possible if `srcSize < ZSTD_FRAMEHEADERSIZE_MAX`.
2510 * - This is not a Zstandard frame.
2511 * When identifying the exact failure cause, it's possible to use
2512 * ZSTD_getFrameHeader(), which will provide a more precise error code. */
2513unsigned ZSTD_getDictID_fromFrame(const void* src, size_t srcSize)
2514{
2515 ZSTD_frameHeader zfp = { 0, 0, 0, ZSTD_frame, 0, 0, 0 };
2516 size_t const hError = ZSTD_getFrameHeader(&zfp, src, srcSize);
2517 if (ZSTD_isError(hError)) return 0;
2518 return zfp.dictID;
2519}
2520
2521
2522/*! ZSTD_decompress_usingDDict() :
2523* Decompression using a pre-digested Dictionary
2524* Use dictionary without significant overhead. */
2525size_t ZSTD_decompress_usingDDict(ZSTD_DCtx* dctx,
2526 void* dst, size_t dstCapacity,
2527 const void* src, size_t srcSize,
2528 const ZSTD_DDict* ddict)
2529{
2530 /* pass content and size in case legacy frames are encountered */
2531 return ZSTD_decompressMultiFrame(dctx, dst, dstCapacity, src, srcSize,
2532 NULL, 0,
2533 ddict);
2534}
2535
2536
2537/*=====================================
2538* Streaming decompression
2539*====================================*/
2540
2541ZSTD_DStream* ZSTD_createDStream(void)
2542{
2543 DEBUGLOG(3, "ZSTD_createDStream");
2544 return ZSTD_createDStream_advanced(ZSTD_defaultCMem);
2545}
2546
2547ZSTD_DStream* ZSTD_initStaticDStream(void *workspace, size_t workspaceSize)
2548{
2549 return ZSTD_initStaticDCtx(workspace, workspaceSize);
2550}
2551
2552ZSTD_DStream* ZSTD_createDStream_advanced(ZSTD_customMem customMem)
2553{
2554 return ZSTD_createDCtx_advanced(customMem);
2555}
2556
2557size_t ZSTD_freeDStream(ZSTD_DStream* zds)
2558{
2559 return ZSTD_freeDCtx(zds);
2560}
2561
2562
2563/* *** Initialization *** */
2564
2565size_t ZSTD_DStreamInSize(void) { return ZSTD_BLOCKSIZE_MAX + ZSTD_blockHeaderSize; }
2566size_t ZSTD_DStreamOutSize(void) { return ZSTD_BLOCKSIZE_MAX; }
2567
2568size_t ZSTD_DCtx_loadDictionary_advanced(ZSTD_DCtx* dctx, const void* dict, size_t dictSize, ZSTD_dictLoadMethod_e dictLoadMethod, ZSTD_dictContentType_e dictContentType)
2569{
2570 if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
2571 ZSTD_freeDDict(dctx->ddictLocal);
2572 if (dict && dictSize >= 8) {
2573 dctx->ddictLocal = ZSTD_createDDict_advanced(dict, dictSize, dictLoadMethod, dictContentType, dctx->customMem);
2574 if (dctx->ddictLocal == NULL) return ERROR(memory_allocation);
2575 } else {
2576 dctx->ddictLocal = NULL;
2577 }
2578 dctx->ddict = dctx->ddictLocal;
2579 return 0;
2580}
2581
2582size_t ZSTD_DCtx_loadDictionary_byReference(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
2583{
2584 return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byRef, ZSTD_dct_auto);
2585}
2586
2587size_t ZSTD_DCtx_loadDictionary(ZSTD_DCtx* dctx, const void* dict, size_t dictSize)
2588{
2589 return ZSTD_DCtx_loadDictionary_advanced(dctx, dict, dictSize, ZSTD_dlm_byCopy, ZSTD_dct_auto);
2590}
2591
2592size_t ZSTD_DCtx_refPrefix_advanced(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize, ZSTD_dictContentType_e dictContentType)
2593{
2594 return ZSTD_DCtx_loadDictionary_advanced(dctx, prefix, prefixSize, ZSTD_dlm_byRef, dictContentType);
2595}
2596
2597size_t ZSTD_DCtx_refPrefix(ZSTD_DCtx* dctx, const void* prefix, size_t prefixSize)
2598{
2599 return ZSTD_DCtx_refPrefix_advanced(dctx, prefix, prefixSize, ZSTD_dct_rawContent);
2600}
2601
2602
2603/* ZSTD_initDStream_usingDict() :
2604 * return : expected size, aka ZSTD_frameHeaderSize_prefix.
2605 * this function cannot fail */
2606size_t ZSTD_initDStream_usingDict(ZSTD_DStream* zds, const void* dict, size_t dictSize)
2607{
2608 DEBUGLOG(4, "ZSTD_initDStream_usingDict");
2609 zds->streamStage = zdss_init;
2610 CHECK_F( ZSTD_DCtx_loadDictionary(zds, dict, dictSize) );
2611 return ZSTD_frameHeaderSize_prefix;
2612}
2613
2614/* note : this variant can't fail */
2615size_t ZSTD_initDStream(ZSTD_DStream* zds)
2616{
2617 DEBUGLOG(4, "ZSTD_initDStream");
2618 return ZSTD_initDStream_usingDict(zds, NULL, 0);
2619}
2620
2621size_t ZSTD_DCtx_refDDict(ZSTD_DCtx* dctx, const ZSTD_DDict* ddict)
2622{
2623 if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
2624 dctx->ddict = ddict;
2625 return 0;
2626}
2627
2628/* ZSTD_initDStream_usingDDict() :
2629 * ddict will just be referenced, and must outlive decompression session
2630 * this function cannot fail */
2631size_t ZSTD_initDStream_usingDDict(ZSTD_DStream* dctx, const ZSTD_DDict* ddict)
2632{
2633 size_t const initResult = ZSTD_initDStream(dctx);
2634 dctx->ddict = ddict;
2635 return initResult;
2636}
2637
2638/* ZSTD_resetDStream() :
2639 * return : expected size, aka ZSTD_frameHeaderSize_prefix.
2640 * this function cannot fail */
2641size_t ZSTD_resetDStream(ZSTD_DStream* dctx)
2642{
2643 DEBUGLOG(4, "ZSTD_resetDStream");
2644 dctx->streamStage = zdss_loadHeader;
2645 dctx->lhSize = dctx->inPos = dctx->outStart = dctx->outEnd = 0;
2646 dctx->legacyVersion = 0;
2647 dctx->hostageByte = 0;
2648 return ZSTD_frameHeaderSize_prefix;
2649}
2650
2651size_t ZSTD_setDStreamParameter(ZSTD_DStream* dctx,
2652 ZSTD_DStreamParameter_e paramType, unsigned paramValue)
2653{
2654 if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
2655 switch(paramType)
2656 {
2657 default : return ERROR(parameter_unsupported);
2658 case DStream_p_maxWindowSize :
2659 DEBUGLOG(4, "setting maxWindowSize = %u KB", paramValue >> 10);
2660 dctx->maxWindowSize = paramValue ? paramValue : (U32)(-1);
2661 break;
2662 }
2663 return 0;
2664}
2665
2666size_t ZSTD_DCtx_setMaxWindowSize(ZSTD_DCtx* dctx, size_t maxWindowSize)
2667{
2668 if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
2669 dctx->maxWindowSize = maxWindowSize;
2670 return 0;
2671}
2672
2673size_t ZSTD_DCtx_setFormat(ZSTD_DCtx* dctx, ZSTD_format_e format)
2674{
2675 DEBUGLOG(4, "ZSTD_DCtx_setFormat : %u", (unsigned)format);
2676 if (dctx->streamStage != zdss_init) return ERROR(stage_wrong);
2677 dctx->format = format;
2678 return 0;
2679}
2680
2681
2682size_t ZSTD_sizeof_DStream(const ZSTD_DStream* dctx)
2683{
2684 return ZSTD_sizeof_DCtx(dctx);
2685}
2686
2687size_t ZSTD_decodingBufferSize_min(unsigned long long windowSize, unsigned long long frameContentSize)
2688{
2689 size_t const blockSize = (size_t) MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
2690 unsigned long long const neededRBSize = windowSize + blockSize + (WILDCOPY_OVERLENGTH * 2);
2691 unsigned long long const neededSize = MIN(frameContentSize, neededRBSize);
2692 size_t const minRBSize = (size_t) neededSize;
2693 if ((unsigned long long)minRBSize != neededSize) return ERROR(frameParameter_windowTooLarge);
2694 return minRBSize;
2695}
2696
2697size_t ZSTD_estimateDStreamSize(size_t windowSize)
2698{
2699 size_t const blockSize = MIN(windowSize, ZSTD_BLOCKSIZE_MAX);
2700 size_t const inBuffSize = blockSize; /* no block can be larger */
2701 size_t const outBuffSize = ZSTD_decodingBufferSize_min(windowSize, ZSTD_CONTENTSIZE_UNKNOWN);
2702 return ZSTD_estimateDCtxSize() + inBuffSize + outBuffSize;
2703}
2704
2705size_t ZSTD_estimateDStreamSize_fromFrame(const void* src, size_t srcSize)
2706{
2707 U32 const windowSizeMax = 1U << ZSTD_WINDOWLOG_MAX; /* note : should be user-selectable */
2708 ZSTD_frameHeader zfh;
2709 size_t const err = ZSTD_getFrameHeader(&zfh, src, srcSize);
2710 if (ZSTD_isError(err)) return err;
2711 if (err>0) return ERROR(srcSize_wrong);
2712 if (zfh.windowSize > windowSizeMax)
2713 return ERROR(frameParameter_windowTooLarge);
2714 return ZSTD_estimateDStreamSize((size_t)zfh.windowSize);
2715}
2716
2717
2718/* ***** Decompression ***** */
2719
2720MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)
2721{
2722 size_t const length = MIN(dstCapacity, srcSize);
2723 memcpy(dst, src, length);
2724 return length;
2725}
2726
2727
2728size_t ZSTD_decompressStream(ZSTD_DStream* zds, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
2729{
2730 const char* const istart = (const char*)(input->src) + input->pos;
2731 const char* const iend = (const char*)(input->src) + input->size;
2732 const char* ip = istart;
2733 char* const ostart = (char*)(output->dst) + output->pos;
2734 char* const oend = (char*)(output->dst) + output->size;
2735 char* op = ostart;
2736 U32 someMoreWork = 1;
2737
2738 DEBUGLOG(5, "ZSTD_decompressStream");
2739 if (input->pos > input->size) { /* forbidden */
2740 DEBUGLOG(5, "in: pos: %u vs size: %u",
2741 (U32)input->pos, (U32)input->size);
2742 return ERROR(srcSize_wrong);
2743 }
2744 if (output->pos > output->size) { /* forbidden */
2745 DEBUGLOG(5, "out: pos: %u vs size: %u",
2746 (U32)output->pos, (U32)output->size);
2747 return ERROR(dstSize_tooSmall);
2748 }
2749 DEBUGLOG(5, "input size : %u", (U32)(input->size - input->pos));
2750
2751 while (someMoreWork) {
2752 switch(zds->streamStage)
2753 {
2754 case zdss_init :
2755 DEBUGLOG(5, "stage zdss_init => transparent reset ");
2756 ZSTD_resetDStream(zds); /* transparent reset on starting decoding a new frame */
2757 /* fall-through */
2758
2759 case zdss_loadHeader :
2760 DEBUGLOG(5, "stage zdss_loadHeader (srcSize : %u)", (U32)(iend - ip));
2761#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
2762 if (zds->legacyVersion) {
2763 /* legacy support is incompatible with static dctx */
2764 if (zds->staticSize) return ERROR(memory_allocation);
2765 { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, zds->legacyVersion, output, input);
2766 if (hint==0) zds->streamStage = zdss_init;
2767 return hint;
2768 } }
2769#endif
2770 { size_t const hSize = ZSTD_getFrameHeader_internal(&zds->fParams, zds->headerBuffer, zds->lhSize, zds->format);
2771 DEBUGLOG(5, "header size : %u", (U32)hSize);
2772 if (ZSTD_isError(hSize)) {
2773#if defined(ZSTD_LEGACY_SUPPORT) && (ZSTD_LEGACY_SUPPORT>=1)
2774 U32 const legacyVersion = ZSTD_isLegacy(istart, iend-istart);
2775 if (legacyVersion) {
2776 const void* const dict = zds->ddict ? zds->ddict->dictContent : NULL;
2777 size_t const dictSize = zds->ddict ? zds->ddict->dictSize : 0;
2778 DEBUGLOG(5, "ZSTD_decompressStream: detected legacy version v0.%u", legacyVersion);
2779 /* legacy support is incompatible with static dctx */
2780 if (zds->staticSize) return ERROR(memory_allocation);
2781 CHECK_F(ZSTD_initLegacyStream(&zds->legacyContext,
2782 zds->previousLegacyVersion, legacyVersion,
2783 dict, dictSize));
2784 zds->legacyVersion = zds->previousLegacyVersion = legacyVersion;
2785 { size_t const hint = ZSTD_decompressLegacyStream(zds->legacyContext, legacyVersion, output, input);
2786 if (hint==0) zds->streamStage = zdss_init; /* or stay in stage zdss_loadHeader */
2787 return hint;
2788 } }
2789#endif
2790 return hSize; /* error */
2791 }
2792 if (hSize != 0) { /* need more input */
2793 size_t const toLoad = hSize - zds->lhSize; /* if hSize!=0, hSize > zds->lhSize */
2794 size_t const remainingInput = (size_t)(iend-ip);
2795 assert(iend >= ip);
2796 if (toLoad > remainingInput) { /* not enough input to load full header */
2797 if (remainingInput > 0) {
2798 memcpy(zds->headerBuffer + zds->lhSize, ip, remainingInput);
2799 zds->lhSize += remainingInput;
2800 }
2801 input->pos = input->size;
2802 return (MAX(ZSTD_frameHeaderSize_min, hSize) - zds->lhSize) + ZSTD_blockHeaderSize; /* remaining header bytes + next block header */
2803 }
2804 assert(ip != NULL);
2805 memcpy(zds->headerBuffer + zds->lhSize, ip, toLoad); zds->lhSize = hSize; ip += toLoad;
2806 break;
2807 } }
2808
2809 /* check for single-pass mode opportunity */
2810 if (zds->fParams.frameContentSize && zds->fParams.windowSize /* skippable frame if == 0 */
2811 && (U64)(size_t)(oend-op) >= zds->fParams.frameContentSize) {
2812 size_t const cSize = ZSTD_findFrameCompressedSize(istart, iend-istart);
2813 if (cSize <= (size_t)(iend-istart)) {
2814 /* shortcut : using single-pass mode */
2815 size_t const decompressedSize = ZSTD_decompress_usingDDict(zds, op, oend-op, istart, cSize, zds->ddict);
2816 if (ZSTD_isError(decompressedSize)) return decompressedSize;
2817 DEBUGLOG(4, "shortcut to single-pass ZSTD_decompress_usingDDict()")
2818 ip = istart + cSize;
2819 op += decompressedSize;
2820 zds->expected = 0;
2821 zds->streamStage = zdss_init;
2822 someMoreWork = 0;
2823 break;
2824 } }
2825
2826 /* Consume header (see ZSTDds_decodeFrameHeader) */
2827 DEBUGLOG(4, "Consume header");
2828 CHECK_F(ZSTD_decompressBegin_usingDDict(zds, zds->ddict));
2829
2830 if ((MEM_readLE32(zds->headerBuffer) & 0xFFFFFFF0U) == ZSTD_MAGIC_SKIPPABLE_START) { /* skippable frame */
2831 zds->expected = MEM_readLE32(zds->headerBuffer + ZSTD_frameIdSize);
2832 zds->stage = ZSTDds_skipFrame;
2833 } else {
2834 CHECK_F(ZSTD_decodeFrameHeader(zds, zds->headerBuffer, zds->lhSize));
2835 zds->expected = ZSTD_blockHeaderSize;
2836 zds->stage = ZSTDds_decodeBlockHeader;
2837 }
2838
2839 /* control buffer memory usage */
2840 DEBUGLOG(4, "Control max memory usage (%u KB <= max %u KB)",
2841 (U32)(zds->fParams.windowSize >>10),
2842 (U32)(zds->maxWindowSize >> 10) );
2843 zds->fParams.windowSize = MAX(zds->fParams.windowSize, 1U << ZSTD_WINDOWLOG_ABSOLUTEMIN);
2844 if (zds->fParams.windowSize > zds->maxWindowSize) return ERROR(frameParameter_windowTooLarge);
2845
2846 /* Adapt buffer sizes to frame header instructions */
2847 { size_t const neededInBuffSize = MAX(zds->fParams.blockSizeMax, 4 /* frame checksum */);
2848 size_t const neededOutBuffSize = ZSTD_decodingBufferSize_min(zds->fParams.windowSize, zds->fParams.frameContentSize);
2849 if ((zds->inBuffSize < neededInBuffSize) || (zds->outBuffSize < neededOutBuffSize)) {
2850 size_t const bufferSize = neededInBuffSize + neededOutBuffSize;
2851 DEBUGLOG(4, "inBuff : from %u to %u",
2852 (U32)zds->inBuffSize, (U32)neededInBuffSize);
2853 DEBUGLOG(4, "outBuff : from %u to %u",
2854 (U32)zds->outBuffSize, (U32)neededOutBuffSize);
2855 if (zds->staticSize) { /* static DCtx */
2856 DEBUGLOG(4, "staticSize : %u", (U32)zds->staticSize);
2857 assert(zds->staticSize >= sizeof(ZSTD_DCtx)); /* controlled at init */
2858 if (bufferSize > zds->staticSize - sizeof(ZSTD_DCtx))
2859 return ERROR(memory_allocation);
2860 } else {
2861 ZSTD_free(zds->inBuff, zds->customMem);
2862 zds->inBuffSize = 0;
2863 zds->outBuffSize = 0;
2864 zds->inBuff = (char*)ZSTD_malloc(bufferSize, zds->customMem);
2865 if (zds->inBuff == NULL) return ERROR(memory_allocation);
2866 }
2867 zds->inBuffSize = neededInBuffSize;
2868 zds->outBuff = zds->inBuff + zds->inBuffSize;
2869 zds->outBuffSize = neededOutBuffSize;
2870 } }
2871 zds->streamStage = zdss_read;
2872 /* fall-through */
2873
2874 case zdss_read:
2875 DEBUGLOG(5, "stage zdss_read");
2876 { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
2877 DEBUGLOG(5, "neededInSize = %u", (U32)neededInSize);
2878 if (neededInSize==0) { /* end of frame */
2879 zds->streamStage = zdss_init;
2880 someMoreWork = 0;
2881 break;
2882 }
2883 if ((size_t)(iend-ip) >= neededInSize) { /* decode directly from src */
2884 int const isSkipFrame = ZSTD_isSkipFrame(zds);
2885 size_t const decodedSize = ZSTD_decompressContinue(zds,
2886 zds->outBuff + zds->outStart, (isSkipFrame ? 0 : zds->outBuffSize - zds->outStart),
2887 ip, neededInSize);
2888 if (ZSTD_isError(decodedSize)) return decodedSize;
2889 ip += neededInSize;
2890 if (!decodedSize && !isSkipFrame) break; /* this was just a header */
2891 zds->outEnd = zds->outStart + decodedSize;
2892 zds->streamStage = zdss_flush;
2893 break;
2894 } }
2895 if (ip==iend) { someMoreWork = 0; break; } /* no more input */
2896 zds->streamStage = zdss_load;
2897 /* fall-through */
2898
2899 case zdss_load:
2900 { size_t const neededInSize = ZSTD_nextSrcSizeToDecompress(zds);
2901 size_t const toLoad = neededInSize - zds->inPos;
2902 int const isSkipFrame = ZSTD_isSkipFrame(zds);
2903 size_t loadedSize;
2904 if (isSkipFrame) {
2905 loadedSize = MIN(toLoad, (size_t)(iend-ip));
2906 } else {
2907 if (toLoad > zds->inBuffSize - zds->inPos) return ERROR(corruption_detected); /* should never happen */
2908 loadedSize = ZSTD_limitCopy(zds->inBuff + zds->inPos, toLoad, ip, iend-ip);
2909 }
2910 ip += loadedSize;
2911 zds->inPos += loadedSize;
2912 if (loadedSize < toLoad) { someMoreWork = 0; break; } /* not enough input, wait for more */
2913
2914 /* decode loaded input */
2915 { size_t const decodedSize = ZSTD_decompressContinue(zds,
2916 zds->outBuff + zds->outStart, zds->outBuffSize - zds->outStart,
2917 zds->inBuff, neededInSize);
2918 if (ZSTD_isError(decodedSize)) return decodedSize;
2919 zds->inPos = 0; /* input is consumed */
2920 if (!decodedSize && !isSkipFrame) { zds->streamStage = zdss_read; break; } /* this was just a header */
2921 zds->outEnd = zds->outStart + decodedSize;
2922 } }
2923 zds->streamStage = zdss_flush;
2924 /* fall-through */
2925
2926 case zdss_flush:
2927 { size_t const toFlushSize = zds->outEnd - zds->outStart;
2928 size_t const flushedSize = ZSTD_limitCopy(op, oend-op, zds->outBuff + zds->outStart, toFlushSize);
2929 op += flushedSize;
2930 zds->outStart += flushedSize;
2931 if (flushedSize == toFlushSize) { /* flush completed */
2932 zds->streamStage = zdss_read;
2933 if ( (zds->outBuffSize < zds->fParams.frameContentSize)
2934 && (zds->outStart + zds->fParams.blockSizeMax > zds->outBuffSize) ) {
2935 DEBUGLOG(5, "restart filling outBuff from beginning (left:%i, needed:%u)",
2936 (int)(zds->outBuffSize - zds->outStart),
2937 (U32)zds->fParams.blockSizeMax);
2938 zds->outStart = zds->outEnd = 0;
2939 }
2940 break;
2941 } }
2942 /* cannot complete flush */
2943 someMoreWork = 0;
2944 break;
2945
2946 default: return ERROR(GENERIC); /* impossible */
2947 } }
2948
2949 /* result */
2950 input->pos += (size_t)(ip-istart);
2951 output->pos += (size_t)(op-ostart);
2952 { size_t nextSrcSizeHint = ZSTD_nextSrcSizeToDecompress(zds);
2953 if (!nextSrcSizeHint) { /* frame fully decoded */
2954 if (zds->outEnd == zds->outStart) { /* output fully flushed */
2955 if (zds->hostageByte) {
2956 if (input->pos >= input->size) {
2957 /* can't release hostage (not present) */
2958 zds->streamStage = zdss_read;
2959 return 1;
2960 }
2961 input->pos++; /* release hostage */
2962 } /* zds->hostageByte */
2963 return 0;
2964 } /* zds->outEnd == zds->outStart */
2965 if (!zds->hostageByte) { /* output not fully flushed; keep last byte as hostage; will be released when all output is flushed */
2966 input->pos--; /* note : pos > 0, otherwise, impossible to finish reading last block */
2967 zds->hostageByte=1;
2968 }
2969 return 1;
2970 } /* nextSrcSizeHint==0 */
2971 nextSrcSizeHint += ZSTD_blockHeaderSize * (ZSTD_nextInputType(zds) == ZSTDnit_block); /* preload header of next block */
2972 assert(zds->inPos <= nextSrcSizeHint);
2973 nextSrcSizeHint -= zds->inPos; /* part already loaded*/
2974 return nextSrcSizeHint;
2975 }
2976}
2977
2978
2979size_t ZSTD_decompress_generic(ZSTD_DCtx* dctx, ZSTD_outBuffer* output, ZSTD_inBuffer* input)
2980{
2981 return ZSTD_decompressStream(dctx, output, input);
2982}
2983
2984size_t ZSTD_decompress_generic_simpleArgs (
2985 ZSTD_DCtx* dctx,
2986 void* dst, size_t dstCapacity, size_t* dstPos,
2987 const void* src, size_t srcSize, size_t* srcPos)
2988{
2989 ZSTD_outBuffer output = { dst, dstCapacity, *dstPos };
2990 ZSTD_inBuffer input = { src, srcSize, *srcPos };
2991 /* ZSTD_compress_generic() will check validity of dstPos and srcPos */
2992 size_t const cErr = ZSTD_decompress_generic(dctx, &output, &input);
2993 *dstPos = output.pos;
2994 *srcPos = input.pos;
2995 return cErr;
2996}
2997
2998void ZSTD_DCtx_reset(ZSTD_DCtx* dctx)
2999{
3000 (void)ZSTD_initDStream(dctx);
3001 dctx->format = ZSTD_f_zstd1;
3002 dctx->maxWindowSize = ZSTD_MAXWINDOWSIZE_DEFAULT;
3003}
3004