1// Copyright 2005 Google Inc. All Rights Reserved.
2//
3// Redistribution and use in source and binary forms, with or without
4// modification, are permitted provided that the following conditions are
5// met:
6//
7// * Redistributions of source code must retain the above copyright
8// notice, this list of conditions and the following disclaimer.
9// * Redistributions in binary form must reproduce the above
10// copyright notice, this list of conditions and the following disclaimer
11// in the documentation and/or other materials provided with the
12// distribution.
13// * Neither the name of Google Inc. nor the names of its
14// contributors may be used to endorse or promote products derived from
15// this software without specific prior written permission.
16//
17// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29#include "snappy.h"
30#include "snappy-internal.h"
31#include "snappy-sinksource.h"
32
33#if !defined(SNAPPY_HAVE_SSSE3)
34// __SSSE3__ is defined by GCC and Clang. Visual Studio doesn't target SIMD
35// support between SSE2 and AVX (so SSSE3 instructions require AVX support), and
36// defines __AVX__ when AVX support is available.
37#if defined(__SSSE3__) || defined(__AVX__)
38#define SNAPPY_HAVE_SSSE3 1
39#else
40#define SNAPPY_HAVE_SSSE3 0
41#endif
42#endif // !defined(SNAPPY_HAVE_SSSE3)
43
44#if !defined(SNAPPY_HAVE_BMI2)
45// __BMI2__ is defined by GCC and Clang. Visual Studio doesn't target BMI2
46// specifically, but it does define __AVX2__ when AVX2 support is available.
47// Fortunately, AVX2 was introduced in Haswell, just like BMI2.
48//
49// BMI2 is not defined as a subset of AVX2 (unlike SSSE3 and AVX above). So,
50// GCC and Clang can build code with AVX2 enabled but BMI2 disabled, in which
51// case issuing BMI2 instructions results in a compiler error.
52#if defined(__BMI2__) || (defined(_MSC_VER) && defined(__AVX2__))
53#define SNAPPY_HAVE_BMI2 1
54#else
55#define SNAPPY_HAVE_BMI2 0
56#endif
57#endif // !defined(SNAPPY_HAVE_BMI2)
58
59#if SNAPPY_HAVE_SSSE3
60// Please do not replace with <x86intrin.h>. or with headers that assume more
61// advanced SSE versions without checking with all the OWNERS.
62#include <tmmintrin.h>
63#endif
64
65#if SNAPPY_HAVE_BMI2
66// Please do not replace with <x86intrin.h>. or with headers that assume more
67// advanced SSE versions without checking with all the OWNERS.
68#include <immintrin.h>
69#endif
70
71#include <stdio.h>
72
73#include <algorithm>
74#include <string>
75#include <vector>
76
77namespace snappy {
78
79using internal::COPY_1_BYTE_OFFSET;
80using internal::COPY_2_BYTE_OFFSET;
81using internal::LITERAL;
82using internal::char_table;
83using internal::kMaximumTagLength;
84
85// Any hash function will produce a valid compressed bitstream, but a good
86// hash function reduces the number of collisions and thus yields better
87// compression for compressible input, and more speed for incompressible
88// input. Of course, it doesn't hurt if the hash function is reasonably fast
89// either, as it gets called a lot.
90static inline uint32 HashBytes(uint32 bytes, int shift) {
91 uint32 kMul = 0x1e35a7bd;
92 return (bytes * kMul) >> shift;
93}
94static inline uint32 Hash(const char* p, int shift) {
95 return HashBytes(UNALIGNED_LOAD32(p), shift);
96}
97
98size_t MaxCompressedLength(size_t source_len) {
99 // Compressed data can be defined as:
100 // compressed := item* literal*
101 // item := literal* copy
102 //
103 // The trailing literal sequence has a space blowup of at most 62/60
104 // since a literal of length 60 needs one tag byte + one extra byte
105 // for length information.
106 //
107 // Item blowup is trickier to measure. Suppose the "copy" op copies
108 // 4 bytes of data. Because of a special check in the encoding code,
109 // we produce a 4-byte copy only if the offset is < 65536. Therefore
110 // the copy op takes 3 bytes to encode, and this type of item leads
111 // to at most the 62/60 blowup for representing literals.
112 //
113 // Suppose the "copy" op copies 5 bytes of data. If the offset is big
114 // enough, it will take 5 bytes to encode the copy op. Therefore the
115 // worst case here is a one-byte literal followed by a five-byte copy.
116 // I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
117 //
118 // This last factor dominates the blowup, so the final estimate is:
119 return 32 + source_len + source_len/6;
120}
121
122namespace {
123
124void UnalignedCopy64(const void* src, void* dst) {
125 char tmp[8];
126 memcpy(tmp, src, 8);
127 memcpy(dst, tmp, 8);
128}
129
130void UnalignedCopy128(const void* src, void* dst) {
131 // memcpy gets vectorized when the appropriate compiler options are used.
132 // For example, x86 compilers targeting SSE2+ will optimize to an SSE2 load
133 // and store.
134 char tmp[16];
135 memcpy(tmp, src, 16);
136 memcpy(dst, tmp, 16);
137}
138
139// Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) a byte at a time. Used
140// for handling COPY operations where the input and output regions may overlap.
141// For example, suppose:
142// src == "ab"
143// op == src + 2
144// op_limit == op + 20
145// After IncrementalCopySlow(src, op, op_limit), the result will have eleven
146// copies of "ab"
147// ababababababababababab
148// Note that this does not match the semantics of either memcpy() or memmove().
149inline char* IncrementalCopySlow(const char* src, char* op,
150 char* const op_limit) {
151 // TODO: Remove pragma when LLVM is aware this
152 // function is only called in cold regions and when cold regions don't get
153 // vectorized or unrolled.
154#ifdef __clang__
155#pragma clang loop unroll(disable)
156#endif
157 while (op < op_limit) {
158 *op++ = *src++;
159 }
160 return op_limit;
161}
162
163#if SNAPPY_HAVE_SSSE3
164
165// This is a table of shuffle control masks that can be used as the source
166// operand for PSHUFB to permute the contents of the destination XMM register
167// into a repeating byte pattern.
168alignas(16) const char pshufb_fill_patterns[7][16] = {
169 {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
170 {0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1},
171 {0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0, 1, 2, 0},
172 {0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3},
173 {0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0},
174 {0, 1, 2, 3, 4, 5, 0, 1, 2, 3, 4, 5, 0, 1, 2, 3},
175 {0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6, 0, 1},
176};
177
178#endif // SNAPPY_HAVE_SSSE3
179
180// Copy [src, src+(op_limit-op)) to [op, (op_limit-op)) but faster than
181// IncrementalCopySlow. buf_limit is the address past the end of the writable
182// region of the buffer.
183inline char* IncrementalCopy(const char* src, char* op, char* const op_limit,
184 char* const buf_limit) {
185 // Terminology:
186 //
187 // slop = buf_limit - op
188 // pat = op - src
189 // len = limit - op
190 assert(src < op);
191 assert(op <= op_limit);
192 assert(op_limit <= buf_limit);
193 // NOTE: The compressor always emits 4 <= len <= 64. It is ok to assume that
194 // to optimize this function but we have to also handle other cases in case
195 // the input does not satisfy these conditions.
196
197 size_t pattern_size = op - src;
198 // The cases are split into different branches to allow the branch predictor,
199 // FDO, and static prediction hints to work better. For each input we list the
200 // ratio of invocations that match each condition.
201 //
202 // input slop < 16 pat < 8 len > 16
203 // ------------------------------------------
204 // html|html4|cp 0% 1.01% 27.73%
205 // urls 0% 0.88% 14.79%
206 // jpg 0% 64.29% 7.14%
207 // pdf 0% 2.56% 58.06%
208 // txt[1-4] 0% 0.23% 0.97%
209 // pb 0% 0.96% 13.88%
210 // bin 0.01% 22.27% 41.17%
211 //
212 // It is very rare that we don't have enough slop for doing block copies. It
213 // is also rare that we need to expand a pattern. Small patterns are common
214 // for incompressible formats and for those we are plenty fast already.
215 // Lengths are normally not greater than 16 but they vary depending on the
216 // input. In general if we always predict len <= 16 it would be an ok
217 // prediction.
218 //
219 // In order to be fast we want a pattern >= 8 bytes and an unrolled loop
220 // copying 2x 8 bytes at a time.
221
222 // Handle the uncommon case where pattern is less than 8 bytes.
223 if (SNAPPY_PREDICT_FALSE(pattern_size < 8)) {
224#if SNAPPY_HAVE_SSSE3
225 // Load the first eight bytes into an 128-bit XMM register, then use PSHUFB
226 // to permute the register's contents in-place into a repeating sequence of
227 // the first "pattern_size" bytes.
228 // For example, suppose:
229 // src == "abc"
230 // op == op + 3
231 // After _mm_shuffle_epi8(), "pattern" will have five copies of "abc"
232 // followed by one byte of slop: abcabcabcabcabca.
233 //
234 // The non-SSE fallback implementation suffers from store-forwarding stalls
235 // because its loads and stores partly overlap. By expanding the pattern
236 // in-place, we avoid the penalty.
237 if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 16)) {
238 const __m128i shuffle_mask = _mm_load_si128(
239 reinterpret_cast<const __m128i*>(pshufb_fill_patterns)
240 + pattern_size - 1);
241 const __m128i pattern = _mm_shuffle_epi8(
242 _mm_loadl_epi64(reinterpret_cast<const __m128i*>(src)), shuffle_mask);
243 // Uninitialized bytes are masked out by the shuffle mask.
244 // TODO: remove annotation and macro defs once MSan is fixed.
245 SNAPPY_ANNOTATE_MEMORY_IS_INITIALIZED(&pattern, sizeof(pattern));
246 pattern_size *= 16 / pattern_size;
247 char* op_end = std::min(op_limit, buf_limit - 15);
248 while (op < op_end) {
249 _mm_storeu_si128(reinterpret_cast<__m128i*>(op), pattern);
250 op += pattern_size;
251 }
252 if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
253 }
254 return IncrementalCopySlow(src, op, op_limit);
255#else // !SNAPPY_HAVE_SSSE3
256 // If plenty of buffer space remains, expand the pattern to at least 8
257 // bytes. The way the following loop is written, we need 8 bytes of buffer
258 // space if pattern_size >= 4, 11 bytes if pattern_size is 1 or 3, and 10
259 // bytes if pattern_size is 2. Precisely encoding that is probably not
260 // worthwhile; instead, invoke the slow path if we cannot write 11 bytes
261 // (because 11 are required in the worst case).
262 if (SNAPPY_PREDICT_TRUE(op <= buf_limit - 11)) {
263 while (pattern_size < 8) {
264 UnalignedCopy64(src, op);
265 op += pattern_size;
266 pattern_size *= 2;
267 }
268 if (SNAPPY_PREDICT_TRUE(op >= op_limit)) return op_limit;
269 } else {
270 return IncrementalCopySlow(src, op, op_limit);
271 }
272#endif // SNAPPY_HAVE_SSSE3
273 }
274 assert(pattern_size >= 8);
275
276 // Copy 2x 8 bytes at a time. Because op - src can be < 16, a single
277 // UnalignedCopy128 might overwrite data in op. UnalignedCopy64 is safe
278 // because expanding the pattern to at least 8 bytes guarantees that
279 // op - src >= 8.
280 //
281 // Typically, the op_limit is the gating factor so try to simplify the loop
282 // based on that.
283 if (SNAPPY_PREDICT_TRUE(op_limit <= buf_limit - 16)) {
284 // Factor the displacement from op to the source into a variable. This helps
285 // simplify the loop below by only varying the op pointer which we need to
286 // test for the end. Note that this was done after carefully examining the
287 // generated code to allow the addressing modes in the loop below to
288 // maximize micro-op fusion where possible on modern Intel processors. The
289 // generated code should be checked carefully for new processors or with
290 // major changes to the compiler.
291 // TODO: Simplify this code when the compiler reliably produces
292 // the correct x86 instruction sequence.
293 ptrdiff_t op_to_src = src - op;
294
295 // The trip count of this loop is not large and so unrolling will only hurt
296 // code size without helping performance.
297 //
298 // TODO: Replace with loop trip count hint.
299#ifdef __clang__
300#pragma clang loop unroll(disable)
301#endif
302 do {
303 UnalignedCopy64(op + op_to_src, op);
304 UnalignedCopy64(op + op_to_src + 8, op + 8);
305 op += 16;
306 } while (op < op_limit);
307 return op_limit;
308 }
309
310 // Fall back to doing as much as we can with the available slop in the
311 // buffer. This code path is relatively cold however so we save code size by
312 // avoiding unrolling and vectorizing.
313 //
314 // TODO: Remove pragma when when cold regions don't get vectorized
315 // or unrolled.
316#ifdef __clang__
317#pragma clang loop unroll(disable)
318#endif
319 for (char *op_end = buf_limit - 16; op < op_end; op += 16, src += 16) {
320 UnalignedCopy64(src, op);
321 UnalignedCopy64(src + 8, op + 8);
322 }
323 if (op >= op_limit)
324 return op_limit;
325
326 // We only take this branch if we didn't have enough slop and we can do a
327 // single 8 byte copy.
328 if (SNAPPY_PREDICT_FALSE(op <= buf_limit - 8)) {
329 UnalignedCopy64(src, op);
330 src += 8;
331 op += 8;
332 }
333 return IncrementalCopySlow(src, op, op_limit);
334}
335
336} // namespace
337
338template <bool allow_fast_path>
339static inline char* EmitLiteral(char* op,
340 const char* literal,
341 int len) {
342 // The vast majority of copies are below 16 bytes, for which a
343 // call to memcpy is overkill. This fast path can sometimes
344 // copy up to 15 bytes too much, but that is okay in the
345 // main loop, since we have a bit to go on for both sides:
346 //
347 // - The input will always have kInputMarginBytes = 15 extra
348 // available bytes, as long as we're in the main loop, and
349 // if not, allow_fast_path = false.
350 // - The output will always have 32 spare bytes (see
351 // MaxCompressedLength).
352 assert(len > 0); // Zero-length literals are disallowed
353 int n = len - 1;
354 if (allow_fast_path && len <= 16) {
355 // Fits in tag byte
356 *op++ = LITERAL | (n << 2);
357
358 UnalignedCopy128(literal, op);
359 return op + len;
360 }
361
362 if (n < 60) {
363 // Fits in tag byte
364 *op++ = LITERAL | (n << 2);
365 } else {
366 int count = (Bits::Log2Floor(n) >> 3) + 1;
367 assert(count >= 1);
368 assert(count <= 4);
369 *op++ = LITERAL | ((59 + count) << 2);
370 // Encode in upcoming bytes.
371 // Write 4 bytes, though we may care about only 1 of them. The output buffer
372 // is guaranteed to have at least 3 more spaces left as 'len >= 61' holds
373 // here and there is a memcpy of size 'len' below.
374 LittleEndian::Store32(op, n);
375 op += count;
376 }
377 memcpy(op, literal, len);
378 return op + len;
379}
380
381template <bool len_less_than_12>
382static inline char* EmitCopyAtMost64(char* op, size_t offset, size_t len) {
383 assert(len <= 64);
384 assert(len >= 4);
385 assert(offset < 65536);
386 assert(len_less_than_12 == (len < 12));
387
388 if (len_less_than_12 && SNAPPY_PREDICT_TRUE(offset < 2048)) {
389 // offset fits in 11 bits. The 3 highest go in the top of the first byte,
390 // and the rest go in the second byte.
391 *op++ = COPY_1_BYTE_OFFSET + ((len - 4) << 2) + ((offset >> 3) & 0xe0);
392 *op++ = offset & 0xff;
393 } else {
394 // Write 4 bytes, though we only care about 3 of them. The output buffer
395 // is required to have some slack, so the extra byte won't overrun it.
396 uint32 u = COPY_2_BYTE_OFFSET + ((len - 1) << 2) + (offset << 8);
397 LittleEndian::Store32(op, u);
398 op += 3;
399 }
400 return op;
401}
402
403template <bool len_less_than_12>
404static inline char* EmitCopy(char* op, size_t offset, size_t len) {
405 assert(len_less_than_12 == (len < 12));
406 if (len_less_than_12) {
407 return EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len);
408 } else {
409 // A special case for len <= 64 might help, but so far measurements suggest
410 // it's in the noise.
411
412 // Emit 64 byte copies but make sure to keep at least four bytes reserved.
413 while (SNAPPY_PREDICT_FALSE(len >= 68)) {
414 op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 64);
415 len -= 64;
416 }
417
418 // One or two copies will now finish the job.
419 if (len > 64) {
420 op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, 60);
421 len -= 60;
422 }
423
424 // Emit remainder.
425 if (len < 12) {
426 op = EmitCopyAtMost64</*len_less_than_12=*/true>(op, offset, len);
427 } else {
428 op = EmitCopyAtMost64</*len_less_than_12=*/false>(op, offset, len);
429 }
430 return op;
431 }
432}
433
434bool GetUncompressedLength(const char* start, size_t n, size_t* result) {
435 uint32 v = 0;
436 const char* limit = start + n;
437 if (Varint::Parse32WithLimit(start, limit, &v) != NULL) {
438 *result = v;
439 return true;
440 } else {
441 return false;
442 }
443}
444
445namespace {
446uint32 CalculateTableSize(uint32 input_size) {
447 assert(kMaxHashTableSize >= 256);
448 if (input_size > kMaxHashTableSize) {
449 return kMaxHashTableSize;
450 }
451 if (input_size < 256) {
452 return 256;
453 }
454 // This is equivalent to Log2Ceiling(input_size), assuming input_size > 1.
455 // 2 << Log2Floor(x - 1) is equivalent to 1 << (1 + Log2Floor(x - 1)).
456 return 2u << Bits::Log2Floor(input_size - 1);
457}
458} // namespace
459
460namespace internal {
461WorkingMemory::WorkingMemory(size_t input_size) {
462 const size_t max_fragment_size = std::min(input_size, kBlockSize);
463 const size_t table_size = CalculateTableSize(max_fragment_size);
464 size_ = table_size * sizeof(*table_) + max_fragment_size +
465 MaxCompressedLength(max_fragment_size);
466 mem_ = std::allocator<char>().allocate(size_);
467 table_ = reinterpret_cast<uint16*>(mem_);
468 input_ = mem_ + table_size * sizeof(*table_);
469 output_ = input_ + max_fragment_size;
470}
471
472WorkingMemory::~WorkingMemory() {
473 std::allocator<char>().deallocate(mem_, size_);
474}
475
476uint16* WorkingMemory::GetHashTable(size_t fragment_size,
477 int* table_size) const {
478 const size_t htsize = CalculateTableSize(fragment_size);
479 memset(table_, 0, htsize * sizeof(*table_));
480 *table_size = htsize;
481 return table_;
482}
483} // end namespace internal
484
485// For 0 <= offset <= 4, GetUint32AtOffset(GetEightBytesAt(p), offset) will
486// equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
487// empirically found that overlapping loads such as
488// UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
489// are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32.
490//
491// We have different versions for 64- and 32-bit; ideally we would avoid the
492// two functions and just inline the UNALIGNED_LOAD64 call into
493// GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
494// enough to avoid loading the value multiple times then. For 64-bit, the load
495// is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
496// done at GetUint32AtOffset() time.
497
498#ifdef ARCH_K8
499
500typedef uint64 EightBytesReference;
501
502static inline EightBytesReference GetEightBytesAt(const char* ptr) {
503 return UNALIGNED_LOAD64(ptr);
504}
505
506static inline uint32 GetUint32AtOffset(uint64 v, int offset) {
507 assert(offset >= 0);
508 assert(offset <= 4);
509 return v >> (LittleEndian::IsLittleEndian() ? 8 * offset : 32 - 8 * offset);
510}
511
512#else
513
514typedef const char* EightBytesReference;
515
516static inline EightBytesReference GetEightBytesAt(const char* ptr) {
517 return ptr;
518}
519
520static inline uint32 GetUint32AtOffset(const char* v, int offset) {
521 assert(offset >= 0);
522 assert(offset <= 4);
523 return UNALIGNED_LOAD32(v + offset);
524}
525
526#endif
527
528// Flat array compression that does not emit the "uncompressed length"
529// prefix. Compresses "input" string to the "*op" buffer.
530//
531// REQUIRES: "input" is at most "kBlockSize" bytes long.
532// REQUIRES: "op" points to an array of memory that is at least
533// "MaxCompressedLength(input.size())" in size.
534// REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
535// REQUIRES: "table_size" is a power of two
536//
537// Returns an "end" pointer into "op" buffer.
538// "end - op" is the compressed size of "input".
539namespace internal {
540char* CompressFragment(const char* input,
541 size_t input_size,
542 char* op,
543 uint16* table,
544 const int table_size) {
545 // "ip" is the input pointer, and "op" is the output pointer.
546 const char* ip = input;
547 assert(input_size <= kBlockSize);
548 assert((table_size & (table_size - 1)) == 0); // table must be power of two
549 const int shift = 32 - Bits::Log2Floor(table_size);
550 assert(static_cast<int>(kuint32max >> shift) == table_size - 1);
551 const char* ip_end = input + input_size;
552 const char* base_ip = ip;
553 // Bytes in [next_emit, ip) will be emitted as literal bytes. Or
554 // [next_emit, ip_end) after the main loop.
555 const char* next_emit = ip;
556
557 const size_t kInputMarginBytes = 15;
558 if (SNAPPY_PREDICT_TRUE(input_size >= kInputMarginBytes)) {
559 const char* ip_limit = input + input_size - kInputMarginBytes;
560
561 for (uint32 next_hash = Hash(++ip, shift); ; ) {
562 assert(next_emit < ip);
563 // The body of this loop calls EmitLiteral once and then EmitCopy one or
564 // more times. (The exception is that when we're close to exhausting
565 // the input we goto emit_remainder.)
566 //
567 // In the first iteration of this loop we're just starting, so
568 // there's nothing to copy, so calling EmitLiteral once is
569 // necessary. And we only start a new iteration when the
570 // current iteration has determined that a call to EmitLiteral will
571 // precede the next call to EmitCopy (if any).
572 //
573 // Step 1: Scan forward in the input looking for a 4-byte-long match.
574 // If we get close to exhausting the input then goto emit_remainder.
575 //
576 // Heuristic match skipping: If 32 bytes are scanned with no matches
577 // found, start looking only at every other byte. If 32 more bytes are
578 // scanned (or skipped), look at every third byte, etc.. When a match is
579 // found, immediately go back to looking at every byte. This is a small
580 // loss (~5% performance, ~0.1% density) for compressible data due to more
581 // bookkeeping, but for non-compressible data (such as JPEG) it's a huge
582 // win since the compressor quickly "realizes" the data is incompressible
583 // and doesn't bother looking for matches everywhere.
584 //
585 // The "skip" variable keeps track of how many bytes there are since the
586 // last match; dividing it by 32 (ie. right-shifting by five) gives the
587 // number of bytes to move ahead for each iteration.
588 uint32 skip = 32;
589
590 const char* next_ip = ip;
591 const char* candidate;
592 do {
593 ip = next_ip;
594 uint32 hash = next_hash;
595 assert(hash == Hash(ip, shift));
596 uint32 bytes_between_hash_lookups = skip >> 5;
597 skip += bytes_between_hash_lookups;
598 next_ip = ip + bytes_between_hash_lookups;
599 if (SNAPPY_PREDICT_FALSE(next_ip > ip_limit)) {
600 goto emit_remainder;
601 }
602 next_hash = Hash(next_ip, shift);
603 candidate = base_ip + table[hash];
604 assert(candidate >= base_ip);
605 assert(candidate < ip);
606
607 table[hash] = ip - base_ip;
608 } while (SNAPPY_PREDICT_TRUE(UNALIGNED_LOAD32(ip) !=
609 UNALIGNED_LOAD32(candidate)));
610
611 // Step 2: A 4-byte match has been found. We'll later see if more
612 // than 4 bytes match. But, prior to the match, input
613 // bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
614 assert(next_emit + 16 <= ip_end);
615 op = EmitLiteral</*allow_fast_path=*/true>(op, next_emit, ip - next_emit);
616
617 // Step 3: Call EmitCopy, and then see if another EmitCopy could
618 // be our next move. Repeat until we find no match for the
619 // input immediately after what was consumed by the last EmitCopy call.
620 //
621 // If we exit this loop normally then we need to call EmitLiteral next,
622 // though we don't yet know how big the literal will be. We handle that
623 // by proceeding to the next iteration of the main loop. We also can exit
624 // this loop via goto if we get close to exhausting the input.
625 EightBytesReference input_bytes;
626 uint32 candidate_bytes = 0;
627
628 do {
629 // We have a 4-byte match at ip, and no need to emit any
630 // "literal bytes" prior to ip.
631 const char* base = ip;
632 std::pair<size_t, bool> p =
633 FindMatchLength(candidate + 4, ip + 4, ip_end);
634 size_t matched = 4 + p.first;
635 ip += matched;
636 size_t offset = base - candidate;
637 assert(0 == memcmp(base, candidate, matched));
638 if (p.second) {
639 op = EmitCopy</*len_less_than_12=*/true>(op, offset, matched);
640 } else {
641 op = EmitCopy</*len_less_than_12=*/false>(op, offset, matched);
642 }
643 next_emit = ip;
644 if (SNAPPY_PREDICT_FALSE(ip >= ip_limit)) {
645 goto emit_remainder;
646 }
647 // We are now looking for a 4-byte match again. We read
648 // table[Hash(ip, shift)] for that. To improve compression,
649 // we also update table[Hash(ip - 1, shift)] and table[Hash(ip, shift)].
650 input_bytes = GetEightBytesAt(ip - 1);
651 uint32 prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
652 table[prev_hash] = ip - base_ip - 1;
653 uint32 cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
654 candidate = base_ip + table[cur_hash];
655 candidate_bytes = UNALIGNED_LOAD32(candidate);
656 table[cur_hash] = ip - base_ip;
657 } while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
658
659 next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
660 ++ip;
661 }
662 }
663
664 emit_remainder:
665 // Emit the remaining bytes as a literal
666 if (next_emit < ip_end) {
667 op = EmitLiteral</*allow_fast_path=*/false>(op, next_emit,
668 ip_end - next_emit);
669 }
670
671 return op;
672}
673} // end namespace internal
674
675// Called back at avery compression call to trace parameters and sizes.
676static inline void Report(const char *algorithm, size_t compressed_size,
677 size_t uncompressed_size) {}
678
679// Signature of output types needed by decompression code.
680// The decompression code is templatized on a type that obeys this
681// signature so that we do not pay virtual function call overhead in
682// the middle of a tight decompression loop.
683//
684// class DecompressionWriter {
685// public:
686// // Called before decompression
687// void SetExpectedLength(size_t length);
688//
689// // Called after decompression
690// bool CheckLength() const;
691//
692// // Called repeatedly during decompression
693// bool Append(const char* ip, size_t length);
694// bool AppendFromSelf(uint32 offset, size_t length);
695//
696// // The rules for how TryFastAppend differs from Append are somewhat
697// // convoluted:
698// //
699// // - TryFastAppend is allowed to decline (return false) at any
700// // time, for any reason -- just "return false" would be
701// // a perfectly legal implementation of TryFastAppend.
702// // The intention is for TryFastAppend to allow a fast path
703// // in the common case of a small append.
704// // - TryFastAppend is allowed to read up to <available> bytes
705// // from the input buffer, whereas Append is allowed to read
706// // <length>. However, if it returns true, it must leave
707// // at least five (kMaximumTagLength) bytes in the input buffer
708// // afterwards, so that there is always enough space to read the
709// // next tag without checking for a refill.
710// // - TryFastAppend must always return decline (return false)
711// // if <length> is 61 or more, as in this case the literal length is not
712// // decoded fully. In practice, this should not be a big problem,
713// // as it is unlikely that one would implement a fast path accepting
714// // this much data.
715// //
716// bool TryFastAppend(const char* ip, size_t available, size_t length);
717// };
718
719static inline uint32 ExtractLowBytes(uint32 v, int n) {
720 assert(n >= 0);
721 assert(n <= 4);
722#if SNAPPY_HAVE_BMI2
723 return _bzhi_u32(v, 8 * n);
724#else
725 // This needs to be wider than uint32 otherwise `mask << 32` will be
726 // undefined.
727 uint64 mask = 0xffffffff;
728 return v & ~(mask << (8 * n));
729#endif
730}
731
732static inline bool LeftShiftOverflows(uint8 value, uint32 shift) {
733 assert(shift < 32);
734 static const uint8 masks[] = {
735 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
736 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
737 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, //
738 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe};
739 return (value & masks[shift]) != 0;
740}
741
742// Helper class for decompression
743class SnappyDecompressor {
744 private:
745 Source* reader_; // Underlying source of bytes to decompress
746 const char* ip_; // Points to next buffered byte
747 const char* ip_limit_; // Points just past buffered bytes
748 uint32 peeked_; // Bytes peeked from reader (need to skip)
749 bool eof_; // Hit end of input without an error?
750 char scratch_[kMaximumTagLength]; // See RefillTag().
751
752 // Ensure that all of the tag metadata for the next tag is available
753 // in [ip_..ip_limit_-1]. Also ensures that [ip,ip+4] is readable even
754 // if (ip_limit_ - ip_ < 5).
755 //
756 // Returns true on success, false on error or end of input.
757 bool RefillTag();
758
759 public:
760 explicit SnappyDecompressor(Source* reader)
761 : reader_(reader),
762 ip_(NULL),
763 ip_limit_(NULL),
764 peeked_(0),
765 eof_(false) {
766 }
767
768 ~SnappyDecompressor() {
769 // Advance past any bytes we peeked at from the reader
770 reader_->Skip(peeked_);
771 }
772
773 // Returns true iff we have hit the end of the input without an error.
774 bool eof() const {
775 return eof_;
776 }
777
778 // Read the uncompressed length stored at the start of the compressed data.
779 // On success, stores the length in *result and returns true.
780 // On failure, returns false.
781 bool ReadUncompressedLength(uint32* result) {
782 assert(ip_ == NULL); // Must not have read anything yet
783 // Length is encoded in 1..5 bytes
784 *result = 0;
785 uint32 shift = 0;
786 while (true) {
787 if (shift >= 32) return false;
788 size_t n;
789 const char* ip = reader_->Peek(&n);
790 if (n == 0) return false;
791 const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
792 reader_->Skip(1);
793 uint32 val = c & 0x7f;
794 if (LeftShiftOverflows(static_cast<uint8>(val), shift)) return false;
795 *result |= val << shift;
796 if (c < 128) {
797 break;
798 }
799 shift += 7;
800 }
801 return true;
802 }
803
804 // Process the next item found in the input.
805 // Returns true if successful, false on error or end of input.
806 template <class Writer>
807#if defined(__GNUC__) && defined(__x86_64__)
808 __attribute__((aligned(32)))
809#endif
810 void DecompressAllTags(Writer* writer) {
811 // In x86, pad the function body to start 16 bytes later. This function has
812 // a couple of hotspots that are highly sensitive to alignment: we have
813 // observed regressions by more than 20% in some metrics just by moving the
814 // exact same code to a different position in the benchmark binary.
815 //
816 // Putting this code on a 32-byte-aligned boundary + 16 bytes makes us hit
817 // the "lucky" case consistently. Unfortunately, this is a very brittle
818 // workaround, and future differences in code generation may reintroduce
819 // this regression. If you experience a big, difficult to explain, benchmark
820 // performance regression here, first try removing this hack.
821#if defined(__GNUC__) && defined(__x86_64__)
822 // Two 8-byte "NOP DWORD ptr [EAX + EAX*1 + 00000000H]" instructions.
823 asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00");
824 asm(".byte 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00");
825#endif
826
827 const char* ip = ip_;
828 // We could have put this refill fragment only at the beginning of the loop.
829 // However, duplicating it at the end of each branch gives the compiler more
830 // scope to optimize the <ip_limit_ - ip> expression based on the local
831 // context, which overall increases speed.
832 #define MAYBE_REFILL() \
833 if (ip_limit_ - ip < kMaximumTagLength) { \
834 ip_ = ip; \
835 if (!RefillTag()) return; \
836 ip = ip_; \
837 }
838
839 MAYBE_REFILL();
840 for ( ;; ) {
841 const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip++));
842
843 // Ratio of iterations that have LITERAL vs non-LITERAL for different
844 // inputs.
845 //
846 // input LITERAL NON_LITERAL
847 // -----------------------------------
848 // html|html4|cp 23% 77%
849 // urls 36% 64%
850 // jpg 47% 53%
851 // pdf 19% 81%
852 // txt[1-4] 25% 75%
853 // pb 24% 76%
854 // bin 24% 76%
855 if (SNAPPY_PREDICT_FALSE((c & 0x3) == LITERAL)) {
856 size_t literal_length = (c >> 2) + 1u;
857 if (writer->TryFastAppend(ip, ip_limit_ - ip, literal_length)) {
858 assert(literal_length < 61);
859 ip += literal_length;
860 // NOTE: There is no MAYBE_REFILL() here, as TryFastAppend()
861 // will not return true unless there's already at least five spare
862 // bytes in addition to the literal.
863 continue;
864 }
865 if (SNAPPY_PREDICT_FALSE(literal_length >= 61)) {
866 // Long literal.
867 const size_t literal_length_length = literal_length - 60;
868 literal_length =
869 ExtractLowBytes(LittleEndian::Load32(ip), literal_length_length) +
870 1;
871 ip += literal_length_length;
872 }
873
874 size_t avail = ip_limit_ - ip;
875 while (avail < literal_length) {
876 if (!writer->Append(ip, avail)) return;
877 literal_length -= avail;
878 reader_->Skip(peeked_);
879 size_t n;
880 ip = reader_->Peek(&n);
881 avail = n;
882 peeked_ = avail;
883 if (avail == 0) return; // Premature end of input
884 ip_limit_ = ip + avail;
885 }
886 if (!writer->Append(ip, literal_length)) {
887 return;
888 }
889 ip += literal_length;
890 MAYBE_REFILL();
891 } else {
892 const size_t entry = char_table[c];
893 const size_t trailer =
894 ExtractLowBytes(LittleEndian::Load32(ip), entry >> 11);
895 const size_t length = entry & 0xff;
896 ip += entry >> 11;
897
898 // copy_offset/256 is encoded in bits 8..10. By just fetching
899 // those bits, we get copy_offset (since the bit-field starts at
900 // bit 8).
901 const size_t copy_offset = entry & 0x700;
902 if (!writer->AppendFromSelf(copy_offset + trailer, length)) {
903 return;
904 }
905 MAYBE_REFILL();
906 }
907 }
908
909#undef MAYBE_REFILL
910 }
911};
912
913bool SnappyDecompressor::RefillTag() {
914 const char* ip = ip_;
915 if (ip == ip_limit_) {
916 // Fetch a new fragment from the reader
917 reader_->Skip(peeked_); // All peeked bytes are used up
918 size_t n;
919 ip = reader_->Peek(&n);
920 peeked_ = n;
921 eof_ = (n == 0);
922 if (eof_) return false;
923 ip_limit_ = ip + n;
924 }
925
926 // Read the tag character
927 assert(ip < ip_limit_);
928 const unsigned char c = *(reinterpret_cast<const unsigned char*>(ip));
929 const uint32 entry = char_table[c];
930 const uint32 needed = (entry >> 11) + 1; // +1 byte for 'c'
931 assert(needed <= sizeof(scratch_));
932
933 // Read more bytes from reader if needed
934 uint32 nbuf = ip_limit_ - ip;
935 if (nbuf < needed) {
936 // Stitch together bytes from ip and reader to form the word
937 // contents. We store the needed bytes in "scratch_". They
938 // will be consumed immediately by the caller since we do not
939 // read more than we need.
940 memmove(scratch_, ip, nbuf);
941 reader_->Skip(peeked_); // All peeked bytes are used up
942 peeked_ = 0;
943 while (nbuf < needed) {
944 size_t length;
945 const char* src = reader_->Peek(&length);
946 if (length == 0) return false;
947 uint32 to_add = std::min<uint32>(needed - nbuf, length);
948 memcpy(scratch_ + nbuf, src, to_add);
949 nbuf += to_add;
950 reader_->Skip(to_add);
951 }
952 assert(nbuf == needed);
953 ip_ = scratch_;
954 ip_limit_ = scratch_ + needed;
955 } else if (nbuf < kMaximumTagLength) {
956 // Have enough bytes, but move into scratch_ so that we do not
957 // read past end of input
958 memmove(scratch_, ip, nbuf);
959 reader_->Skip(peeked_); // All peeked bytes are used up
960 peeked_ = 0;
961 ip_ = scratch_;
962 ip_limit_ = scratch_ + nbuf;
963 } else {
964 // Pass pointer to buffer returned by reader_.
965 ip_ = ip;
966 }
967 return true;
968}
969
970template <typename Writer>
971static bool InternalUncompress(Source* r, Writer* writer) {
972 // Read the uncompressed length from the front of the compressed input
973 SnappyDecompressor decompressor(r);
974 uint32 uncompressed_len = 0;
975 if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
976
977 return InternalUncompressAllTags(&decompressor, writer, r->Available(),
978 uncompressed_len);
979}
980
981template <typename Writer>
982static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
983 Writer* writer,
984 uint32 compressed_len,
985 uint32 uncompressed_len) {
986 Report("snappy_uncompress", compressed_len, uncompressed_len);
987
988 writer->SetExpectedLength(uncompressed_len);
989
990 // Process the entire input
991 decompressor->DecompressAllTags(writer);
992 writer->Flush();
993 return (decompressor->eof() && writer->CheckLength());
994}
995
996bool GetUncompressedLength(Source* source, uint32* result) {
997 SnappyDecompressor decompressor(source);
998 return decompressor.ReadUncompressedLength(result);
999}
1000
1001size_t Compress(Source* reader, Sink* writer) {
1002 size_t written = 0;
1003 size_t N = reader->Available();
1004 const size_t uncompressed_size = N;
1005 char ulength[Varint::kMax32];
1006 char* p = Varint::Encode32(ulength, N);
1007 writer->Append(ulength, p-ulength);
1008 written += (p - ulength);
1009
1010 internal::WorkingMemory wmem(N);
1011
1012 while (N > 0) {
1013 // Get next block to compress (without copying if possible)
1014 size_t fragment_size;
1015 const char* fragment = reader->Peek(&fragment_size);
1016 assert(fragment_size != 0); // premature end of input
1017 const size_t num_to_read = std::min(N, kBlockSize);
1018 size_t bytes_read = fragment_size;
1019
1020 size_t pending_advance = 0;
1021 if (bytes_read >= num_to_read) {
1022 // Buffer returned by reader is large enough
1023 pending_advance = num_to_read;
1024 fragment_size = num_to_read;
1025 } else {
1026 char* scratch = wmem.GetScratchInput();
1027 memcpy(scratch, fragment, bytes_read);
1028 reader->Skip(bytes_read);
1029
1030 while (bytes_read < num_to_read) {
1031 fragment = reader->Peek(&fragment_size);
1032 size_t n = std::min<size_t>(fragment_size, num_to_read - bytes_read);
1033 memcpy(scratch + bytes_read, fragment, n);
1034 bytes_read += n;
1035 reader->Skip(n);
1036 }
1037 assert(bytes_read == num_to_read);
1038 fragment = scratch;
1039 fragment_size = num_to_read;
1040 }
1041 assert(fragment_size == num_to_read);
1042
1043 // Get encoding table for compression
1044 int table_size;
1045 uint16* table = wmem.GetHashTable(num_to_read, &table_size);
1046
1047 // Compress input_fragment and append to dest
1048 const int max_output = MaxCompressedLength(num_to_read);
1049
1050 // Need a scratch buffer for the output, in case the byte sink doesn't
1051 // have room for us directly.
1052
1053 // Since we encode kBlockSize regions followed by a region
1054 // which is <= kBlockSize in length, a previously allocated
1055 // scratch_output[] region is big enough for this iteration.
1056 char* dest = writer->GetAppendBuffer(max_output, wmem.GetScratchOutput());
1057 char* end = internal::CompressFragment(fragment, fragment_size, dest, table,
1058 table_size);
1059 writer->Append(dest, end - dest);
1060 written += (end - dest);
1061
1062 N -= num_to_read;
1063 reader->Skip(pending_advance);
1064 }
1065
1066 Report("snappy_compress", written, uncompressed_size);
1067
1068 return written;
1069}
1070
1071// -----------------------------------------------------------------------
1072// IOVec interfaces
1073// -----------------------------------------------------------------------
1074
1075// A type that writes to an iovec.
1076// Note that this is not a "ByteSink", but a type that matches the
1077// Writer template argument to SnappyDecompressor::DecompressAllTags().
1078class SnappyIOVecWriter {
1079 private:
1080 // output_iov_end_ is set to iov + count and used to determine when
1081 // the end of the iovs is reached.
1082 const struct iovec* output_iov_end_;
1083
1084#if !defined(NDEBUG)
1085 const struct iovec* output_iov_;
1086#endif // !defined(NDEBUG)
1087
1088 // Current iov that is being written into.
1089 const struct iovec* curr_iov_;
1090
1091 // Pointer to current iov's write location.
1092 char* curr_iov_output_;
1093
1094 // Remaining bytes to write into curr_iov_output.
1095 size_t curr_iov_remaining_;
1096
1097 // Total bytes decompressed into output_iov_ so far.
1098 size_t total_written_;
1099
1100 // Maximum number of bytes that will be decompressed into output_iov_.
1101 size_t output_limit_;
1102
1103 static inline char* GetIOVecPointer(const struct iovec* iov, size_t offset) {
1104 return reinterpret_cast<char*>(iov->iov_base) + offset;
1105 }
1106
1107 public:
1108 // Does not take ownership of iov. iov must be valid during the
1109 // entire lifetime of the SnappyIOVecWriter.
1110 inline SnappyIOVecWriter(const struct iovec* iov, size_t iov_count)
1111 : output_iov_end_(iov + iov_count),
1112#if !defined(NDEBUG)
1113 output_iov_(iov),
1114#endif // !defined(NDEBUG)
1115 curr_iov_(iov),
1116 curr_iov_output_(iov_count ? reinterpret_cast<char*>(iov->iov_base)
1117 : nullptr),
1118 curr_iov_remaining_(iov_count ? iov->iov_len : 0),
1119 total_written_(0),
1120 output_limit_(-1) {}
1121
1122 inline void SetExpectedLength(size_t len) {
1123 output_limit_ = len;
1124 }
1125
1126 inline bool CheckLength() const {
1127 return total_written_ == output_limit_;
1128 }
1129
1130 inline bool Append(const char* ip, size_t len) {
1131 if (total_written_ + len > output_limit_) {
1132 return false;
1133 }
1134
1135 return AppendNoCheck(ip, len);
1136 }
1137
1138 inline bool AppendNoCheck(const char* ip, size_t len) {
1139 while (len > 0) {
1140 if (curr_iov_remaining_ == 0) {
1141 // This iovec is full. Go to the next one.
1142 if (curr_iov_ + 1 >= output_iov_end_) {
1143 return false;
1144 }
1145 ++curr_iov_;
1146 curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
1147 curr_iov_remaining_ = curr_iov_->iov_len;
1148 }
1149
1150 const size_t to_write = std::min(len, curr_iov_remaining_);
1151 memcpy(curr_iov_output_, ip, to_write);
1152 curr_iov_output_ += to_write;
1153 curr_iov_remaining_ -= to_write;
1154 total_written_ += to_write;
1155 ip += to_write;
1156 len -= to_write;
1157 }
1158
1159 return true;
1160 }
1161
1162 inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
1163 const size_t space_left = output_limit_ - total_written_;
1164 if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16 &&
1165 curr_iov_remaining_ >= 16) {
1166 // Fast path, used for the majority (about 95%) of invocations.
1167 UnalignedCopy128(ip, curr_iov_output_);
1168 curr_iov_output_ += len;
1169 curr_iov_remaining_ -= len;
1170 total_written_ += len;
1171 return true;
1172 }
1173
1174 return false;
1175 }
1176
1177 inline bool AppendFromSelf(size_t offset, size_t len) {
1178 // See SnappyArrayWriter::AppendFromSelf for an explanation of
1179 // the "offset - 1u" trick.
1180 if (offset - 1u >= total_written_) {
1181 return false;
1182 }
1183 const size_t space_left = output_limit_ - total_written_;
1184 if (len > space_left) {
1185 return false;
1186 }
1187
1188 // Locate the iovec from which we need to start the copy.
1189 const iovec* from_iov = curr_iov_;
1190 size_t from_iov_offset = curr_iov_->iov_len - curr_iov_remaining_;
1191 while (offset > 0) {
1192 if (from_iov_offset >= offset) {
1193 from_iov_offset -= offset;
1194 break;
1195 }
1196
1197 offset -= from_iov_offset;
1198 --from_iov;
1199#if !defined(NDEBUG)
1200 assert(from_iov >= output_iov_);
1201#endif // !defined(NDEBUG)
1202 from_iov_offset = from_iov->iov_len;
1203 }
1204
1205 // Copy <len> bytes starting from the iovec pointed to by from_iov_index to
1206 // the current iovec.
1207 while (len > 0) {
1208 assert(from_iov <= curr_iov_);
1209 if (from_iov != curr_iov_) {
1210 const size_t to_copy =
1211 std::min((unsigned long)(from_iov->iov_len - from_iov_offset), (unsigned long)len);
1212 AppendNoCheck(GetIOVecPointer(from_iov, from_iov_offset), to_copy);
1213 len -= to_copy;
1214 if (len > 0) {
1215 ++from_iov;
1216 from_iov_offset = 0;
1217 }
1218 } else {
1219 size_t to_copy = curr_iov_remaining_;
1220 if (to_copy == 0) {
1221 // This iovec is full. Go to the next one.
1222 if (curr_iov_ + 1 >= output_iov_end_) {
1223 return false;
1224 }
1225 ++curr_iov_;
1226 curr_iov_output_ = reinterpret_cast<char*>(curr_iov_->iov_base);
1227 curr_iov_remaining_ = curr_iov_->iov_len;
1228 continue;
1229 }
1230 if (to_copy > len) {
1231 to_copy = len;
1232 }
1233
1234 IncrementalCopy(GetIOVecPointer(from_iov, from_iov_offset),
1235 curr_iov_output_, curr_iov_output_ + to_copy,
1236 curr_iov_output_ + curr_iov_remaining_);
1237 curr_iov_output_ += to_copy;
1238 curr_iov_remaining_ -= to_copy;
1239 from_iov_offset += to_copy;
1240 total_written_ += to_copy;
1241 len -= to_copy;
1242 }
1243 }
1244
1245 return true;
1246 }
1247
1248 inline void Flush() {}
1249};
1250
1251bool RawUncompressToIOVec(const char* compressed, size_t compressed_length,
1252 const struct iovec* iov, size_t iov_cnt) {
1253 ByteArraySource reader(compressed, compressed_length);
1254 return RawUncompressToIOVec(&reader, iov, iov_cnt);
1255}
1256
1257bool RawUncompressToIOVec(Source* compressed, const struct iovec* iov,
1258 size_t iov_cnt) {
1259 SnappyIOVecWriter output(iov, iov_cnt);
1260 return InternalUncompress(compressed, &output);
1261}
1262
1263// -----------------------------------------------------------------------
1264// Flat array interfaces
1265// -----------------------------------------------------------------------
1266
1267// A type that writes to a flat array.
1268// Note that this is not a "ByteSink", but a type that matches the
1269// Writer template argument to SnappyDecompressor::DecompressAllTags().
1270class SnappyArrayWriter {
1271 private:
1272 char* base_;
1273 char* op_;
1274 char* op_limit_;
1275
1276 public:
1277 inline explicit SnappyArrayWriter(char* dst)
1278 : base_(dst),
1279 op_(dst),
1280 op_limit_(dst) {
1281 }
1282
1283 inline void SetExpectedLength(size_t len) {
1284 op_limit_ = op_ + len;
1285 }
1286
1287 inline bool CheckLength() const {
1288 return op_ == op_limit_;
1289 }
1290
1291 inline bool Append(const char* ip, size_t len) {
1292 char* op = op_;
1293 const size_t space_left = op_limit_ - op;
1294 if (space_left < len) {
1295 return false;
1296 }
1297 memcpy(op, ip, len);
1298 op_ = op + len;
1299 return true;
1300 }
1301
1302 inline bool TryFastAppend(const char* ip, size_t available, size_t len) {
1303 char* op = op_;
1304 const size_t space_left = op_limit_ - op;
1305 if (len <= 16 && available >= 16 + kMaximumTagLength && space_left >= 16) {
1306 // Fast path, used for the majority (about 95%) of invocations.
1307 UnalignedCopy128(ip, op);
1308 op_ = op + len;
1309 return true;
1310 } else {
1311 return false;
1312 }
1313 }
1314
1315 inline bool AppendFromSelf(size_t offset, size_t len) {
1316 char* const op_end = op_ + len;
1317
1318 // Check if we try to append from before the start of the buffer.
1319 // Normally this would just be a check for "produced < offset",
1320 // but "produced <= offset - 1u" is equivalent for every case
1321 // except the one where offset==0, where the right side will wrap around
1322 // to a very big number. This is convenient, as offset==0 is another
1323 // invalid case that we also want to catch, so that we do not go
1324 // into an infinite loop.
1325 if (Produced() <= offset - 1u || op_end > op_limit_) return false;
1326 op_ = IncrementalCopy(op_ - offset, op_, op_end, op_limit_);
1327
1328 return true;
1329 }
1330 inline size_t Produced() const {
1331 assert(op_ >= base_);
1332 return op_ - base_;
1333 }
1334 inline void Flush() {}
1335};
1336
1337bool RawUncompress(const char* compressed, size_t n, char* uncompressed) {
1338 ByteArraySource reader(compressed, n);
1339 return RawUncompress(&reader, uncompressed);
1340}
1341
1342bool RawUncompress(Source* compressed, char* uncompressed) {
1343 SnappyArrayWriter output(uncompressed);
1344 return InternalUncompress(compressed, &output);
1345}
1346
1347bool Uncompress(const char* compressed, size_t n, string* uncompressed) {
1348 size_t ulength;
1349 if (!GetUncompressedLength(compressed, n, &ulength)) {
1350 return false;
1351 }
1352 // On 32-bit builds: max_size() < kuint32max. Check for that instead
1353 // of crashing (e.g., consider externally specified compressed data).
1354 if (ulength > uncompressed->max_size()) {
1355 return false;
1356 }
1357 STLStringResizeUninitialized(uncompressed, ulength);
1358 return RawUncompress(compressed, n, string_as_array(uncompressed));
1359}
1360
1361// A Writer that drops everything on the floor and just does validation
1362class SnappyDecompressionValidator {
1363 private:
1364 size_t expected_;
1365 size_t produced_;
1366
1367 public:
1368 inline SnappyDecompressionValidator() : expected_(0), produced_(0) { }
1369 inline void SetExpectedLength(size_t len) {
1370 expected_ = len;
1371 }
1372 inline bool CheckLength() const {
1373 return expected_ == produced_;
1374 }
1375 inline bool Append(const char* ip, size_t len) {
1376 produced_ += len;
1377 return produced_ <= expected_;
1378 }
1379 inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
1380 return false;
1381 }
1382 inline bool AppendFromSelf(size_t offset, size_t len) {
1383 // See SnappyArrayWriter::AppendFromSelf for an explanation of
1384 // the "offset - 1u" trick.
1385 if (produced_ <= offset - 1u) return false;
1386 produced_ += len;
1387 return produced_ <= expected_;
1388 }
1389 inline void Flush() {}
1390};
1391
1392bool IsValidCompressedBuffer(const char* compressed, size_t n) {
1393 ByteArraySource reader(compressed, n);
1394 SnappyDecompressionValidator writer;
1395 return InternalUncompress(&reader, &writer);
1396}
1397
1398bool IsValidCompressed(Source* compressed) {
1399 SnappyDecompressionValidator writer;
1400 return InternalUncompress(compressed, &writer);
1401}
1402
1403void RawCompress(const char* input,
1404 size_t input_length,
1405 char* compressed,
1406 size_t* compressed_length) {
1407 ByteArraySource reader(input, input_length);
1408 UncheckedByteArraySink writer(compressed);
1409 Compress(&reader, &writer);
1410
1411 // Compute how many bytes were added
1412 *compressed_length = (writer.CurrentDestination() - compressed);
1413}
1414
1415size_t Compress(const char* input, size_t input_length, string* compressed) {
1416 // Pre-grow the buffer to the max length of the compressed output
1417 STLStringResizeUninitialized(compressed, MaxCompressedLength(input_length));
1418
1419 size_t compressed_length;
1420 RawCompress(input, input_length, string_as_array(compressed),
1421 &compressed_length);
1422 compressed->resize(compressed_length);
1423 return compressed_length;
1424}
1425
1426// -----------------------------------------------------------------------
1427// Sink interface
1428// -----------------------------------------------------------------------
1429
1430// A type that decompresses into a Sink. The template parameter
1431// Allocator must export one method "char* Allocate(int size);", which
1432// allocates a buffer of "size" and appends that to the destination.
1433template <typename Allocator>
1434class SnappyScatteredWriter {
1435 Allocator allocator_;
1436
1437 // We need random access into the data generated so far. Therefore
1438 // we keep track of all of the generated data as an array of blocks.
1439 // All of the blocks except the last have length kBlockSize.
1440 std::vector<char*> blocks_;
1441 size_t expected_;
1442
1443 // Total size of all fully generated blocks so far
1444 size_t full_size_;
1445
1446 // Pointer into current output block
1447 char* op_base_; // Base of output block
1448 char* op_ptr_; // Pointer to next unfilled byte in block
1449 char* op_limit_; // Pointer just past block
1450
1451 inline size_t Size() const {
1452 return full_size_ + (op_ptr_ - op_base_);
1453 }
1454
1455 bool SlowAppend(const char* ip, size_t len);
1456 bool SlowAppendFromSelf(size_t offset, size_t len);
1457
1458 public:
1459 inline explicit SnappyScatteredWriter(const Allocator& allocator)
1460 : allocator_(allocator),
1461 full_size_(0),
1462 op_base_(NULL),
1463 op_ptr_(NULL),
1464 op_limit_(NULL) {
1465 }
1466
1467 inline void SetExpectedLength(size_t len) {
1468 assert(blocks_.empty());
1469 expected_ = len;
1470 }
1471
1472 inline bool CheckLength() const {
1473 return Size() == expected_;
1474 }
1475
1476 // Return the number of bytes actually uncompressed so far
1477 inline size_t Produced() const {
1478 return Size();
1479 }
1480
1481 inline bool Append(const char* ip, size_t len) {
1482 size_t avail = op_limit_ - op_ptr_;
1483 if (len <= avail) {
1484 // Fast path
1485 memcpy(op_ptr_, ip, len);
1486 op_ptr_ += len;
1487 return true;
1488 } else {
1489 return SlowAppend(ip, len);
1490 }
1491 }
1492
1493 inline bool TryFastAppend(const char* ip, size_t available, size_t length) {
1494 char* op = op_ptr_;
1495 const int space_left = op_limit_ - op;
1496 if (length <= 16 && available >= 16 + kMaximumTagLength &&
1497 space_left >= 16) {
1498 // Fast path, used for the majority (about 95%) of invocations.
1499 UnalignedCopy128(ip, op);
1500 op_ptr_ = op + length;
1501 return true;
1502 } else {
1503 return false;
1504 }
1505 }
1506
1507 inline bool AppendFromSelf(size_t offset, size_t len) {
1508 char* const op_end = op_ptr_ + len;
1509 // See SnappyArrayWriter::AppendFromSelf for an explanation of
1510 // the "offset - 1u" trick.
1511 if (SNAPPY_PREDICT_TRUE(offset - 1u < (size_t)(op_ptr_ - op_base_) &&
1512 op_end <= op_limit_)) {
1513 // Fast path: src and dst in current block.
1514 op_ptr_ = IncrementalCopy(op_ptr_ - offset, op_ptr_, op_end, op_limit_);
1515 return true;
1516 }
1517 return SlowAppendFromSelf(offset, len);
1518 }
1519
1520 // Called at the end of the decompress. We ask the allocator
1521 // write all blocks to the sink.
1522 inline void Flush() { allocator_.Flush(Produced()); }
1523};
1524
1525template<typename Allocator>
1526bool SnappyScatteredWriter<Allocator>::SlowAppend(const char* ip, size_t len) {
1527 size_t avail = op_limit_ - op_ptr_;
1528 while (len > avail) {
1529 // Completely fill this block
1530 memcpy(op_ptr_, ip, avail);
1531 op_ptr_ += avail;
1532 assert(op_limit_ - op_ptr_ == 0);
1533 full_size_ += (op_ptr_ - op_base_);
1534 len -= avail;
1535 ip += avail;
1536
1537 // Bounds check
1538 if (full_size_ + len > expected_) {
1539 return false;
1540 }
1541
1542 // Make new block
1543 size_t bsize = std::min<size_t>(kBlockSize, expected_ - full_size_);
1544 op_base_ = allocator_.Allocate(bsize);
1545 op_ptr_ = op_base_;
1546 op_limit_ = op_base_ + bsize;
1547 blocks_.push_back(op_base_);
1548 avail = bsize;
1549 }
1550
1551 memcpy(op_ptr_, ip, len);
1552 op_ptr_ += len;
1553 return true;
1554}
1555
1556template<typename Allocator>
1557bool SnappyScatteredWriter<Allocator>::SlowAppendFromSelf(size_t offset,
1558 size_t len) {
1559 // Overflow check
1560 // See SnappyArrayWriter::AppendFromSelf for an explanation of
1561 // the "offset - 1u" trick.
1562 const size_t cur = Size();
1563 if (offset - 1u >= cur) return false;
1564 if (expected_ - cur < len) return false;
1565
1566 // Currently we shouldn't ever hit this path because Compress() chops the
1567 // input into blocks and does not create cross-block copies. However, it is
1568 // nice if we do not rely on that, since we can get better compression if we
1569 // allow cross-block copies and thus might want to change the compressor in
1570 // the future.
1571 size_t src = cur - offset;
1572 while (len-- > 0) {
1573 char c = blocks_[src >> kBlockLog][src & (kBlockSize-1)];
1574 Append(&c, 1);
1575 src++;
1576 }
1577 return true;
1578}
1579
1580class SnappySinkAllocator {
1581 public:
1582 explicit SnappySinkAllocator(Sink* dest): dest_(dest) {}
1583 ~SnappySinkAllocator() {}
1584
1585 char* Allocate(int size) {
1586 Datablock block(new char[size], size);
1587 blocks_.push_back(block);
1588 return block.data;
1589 }
1590
1591 // We flush only at the end, because the writer wants
1592 // random access to the blocks and once we hand the
1593 // block over to the sink, we can't access it anymore.
1594 // Also we don't write more than has been actually written
1595 // to the blocks.
1596 void Flush(size_t size) {
1597 size_t size_written = 0;
1598 size_t block_size;
1599 for (size_t i = 0; i < blocks_.size(); ++i) {
1600 block_size = std::min<size_t>(blocks_[i].size, size - size_written);
1601 dest_->AppendAndTakeOwnership(blocks_[i].data, block_size,
1602 &SnappySinkAllocator::Deleter, NULL);
1603 size_written += block_size;
1604 }
1605 blocks_.clear();
1606 }
1607
1608 private:
1609 struct Datablock {
1610 char* data;
1611 size_t size;
1612 Datablock(char* p, size_t s) : data(p), size(s) {}
1613 };
1614
1615 static void Deleter(void* arg, const char* bytes, size_t size) {
1616 delete[] bytes;
1617 }
1618
1619 Sink* dest_;
1620 std::vector<Datablock> blocks_;
1621
1622 // Note: copying this object is allowed
1623};
1624
1625size_t UncompressAsMuchAsPossible(Source* compressed, Sink* uncompressed) {
1626 SnappySinkAllocator allocator(uncompressed);
1627 SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
1628 InternalUncompress(compressed, &writer);
1629 return writer.Produced();
1630}
1631
1632bool Uncompress(Source* compressed, Sink* uncompressed) {
1633 // Read the uncompressed length from the front of the compressed input
1634 SnappyDecompressor decompressor(compressed);
1635 uint32 uncompressed_len = 0;
1636 if (!decompressor.ReadUncompressedLength(&uncompressed_len)) {
1637 return false;
1638 }
1639
1640 char c;
1641 size_t allocated_size;
1642 char* buf = uncompressed->GetAppendBufferVariable(
1643 1, uncompressed_len, &c, 1, &allocated_size);
1644
1645 const size_t compressed_len = compressed->Available();
1646 // If we can get a flat buffer, then use it, otherwise do block by block
1647 // uncompression
1648 if (allocated_size >= uncompressed_len) {
1649 SnappyArrayWriter writer(buf);
1650 bool result = InternalUncompressAllTags(&decompressor, &writer,
1651 compressed_len, uncompressed_len);
1652 uncompressed->Append(buf, writer.Produced());
1653 return result;
1654 } else {
1655 SnappySinkAllocator allocator(uncompressed);
1656 SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
1657 return InternalUncompressAllTags(&decompressor, &writer, compressed_len,
1658 uncompressed_len);
1659 }
1660}
1661
1662} // namespace snappy
1663