1 | /* crc32.c -- compute the CRC-32 of a data stream |
2 | * Copyright (C) 1995-2006, 2010, 2011, 2012, 2016, 2018 Mark Adler |
3 | * For conditions of distribution and use, see copyright notice in zlib.h |
4 | * |
5 | * Thanks to Rodney Brown <rbrown64@csc.com.au> for his contribution of faster |
6 | * CRC methods: exclusive-oring 32 bits of data at a time, and pre-computing |
7 | * tables for updating the shift register in one step with three exclusive-ors |
8 | * instead of four steps with four exclusive-ors. This results in about a |
9 | * factor of two increase in speed on a Power PC G4 (PPC7455) using gcc -O3. |
10 | */ |
11 | |
12 | /* @(#) $Id$ */ |
13 | |
14 | #include "zbuild.h" |
15 | #include "zendian.h" |
16 | #include <inttypes.h> |
17 | #include "deflate.h" |
18 | #include "functable.h" |
19 | #include "crc32_p.h" |
20 | #include "crc32.h" |
21 | |
22 | |
23 | /* Local functions for crc concatenation */ |
24 | static uint32_t crc32_combine_(uint32_t crc1, uint32_t crc2, z_off64_t len2); |
25 | static void crc32_combine_gen_(uint32_t *op, z_off64_t len2); |
26 | |
27 | /* ========================================================================= |
28 | * This function can be used by asm versions of crc32() |
29 | */ |
30 | const uint32_t * ZEXPORT PREFIX(get_crc_table)(void) { |
31 | return (const uint32_t *)crc_table; |
32 | } |
33 | |
34 | uint32_t ZEXPORT PREFIX(crc32_z)(uint32_t crc, const unsigned char *buf, size_t len) { |
35 | if (buf == NULL) return 0; |
36 | |
37 | return functable.crc32(crc, buf, len); |
38 | } |
39 | /* ========================================================================= */ |
40 | #define DO1 crc = crc_table[0][((int)crc ^ (*buf++)) & 0xff] ^ (crc >> 8) |
41 | #define DO8 DO1; DO1; DO1; DO1; DO1; DO1; DO1; DO1 |
42 | #define DO4 DO1; DO1; DO1; DO1 |
43 | |
44 | /* ========================================================================= */ |
45 | ZLIB_INTERNAL uint32_t crc32_generic(uint32_t crc, const unsigned char *buf, uint64_t len) |
46 | { |
47 | crc = crc ^ 0xffffffff; |
48 | |
49 | #ifdef UNROLL_MORE |
50 | while (len >= 8) { |
51 | DO8; |
52 | len -= 8; |
53 | } |
54 | #else |
55 | while (len >= 4) { |
56 | DO4; |
57 | len -= 4; |
58 | } |
59 | #endif |
60 | |
61 | if (len) do { |
62 | DO1; |
63 | } while (--len); |
64 | return crc ^ 0xffffffff; |
65 | } |
66 | |
67 | #ifdef ZLIB_COMPAT |
68 | unsigned long ZEXPORT PREFIX(crc32)(unsigned long crc, const unsigned char *buf, unsigned int len) { |
69 | return (unsigned long) PREFIX(crc32_z)((uint32_t) crc, buf, len); |
70 | } |
71 | #else |
72 | uint32_t ZEXPORT PREFIX(crc32)(uint32_t crc, const unsigned char *buf, uint32_t len) { |
73 | return PREFIX(crc32_z)(crc, buf, len); |
74 | } |
75 | #endif |
76 | |
77 | /* |
78 | This BYFOUR code accesses the passed unsigned char * buffer with a 32-bit |
79 | integer pointer type. This violates the strict aliasing rule, where a |
80 | compiler can assume, for optimization purposes, that two pointers to |
81 | fundamentally different types won't ever point to the same memory. This can |
82 | manifest as a problem only if one of the pointers is written to. This code |
83 | only reads from those pointers. So long as this code remains isolated in |
84 | this compilation unit, there won't be a problem. For this reason, this code |
85 | should not be copied and pasted into a compilation unit in which other code |
86 | writes to the buffer that is passed to these routines. |
87 | */ |
88 | |
89 | /* ========================================================================= */ |
90 | #if BYTE_ORDER == LITTLE_ENDIAN |
91 | #define DOLIT4 c ^= *buf4++; \ |
92 | c = crc_table[3][c & 0xff] ^ crc_table[2][(c >> 8) & 0xff] ^ \ |
93 | crc_table[1][(c >> 16) & 0xff] ^ crc_table[0][c >> 24] |
94 | #define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4 |
95 | |
96 | /* ========================================================================= */ |
97 | ZLIB_INTERNAL uint32_t crc32_little(uint32_t crc, const unsigned char *buf, uint64_t len) { |
98 | register uint32_t c; |
99 | register const uint32_t *buf4; |
100 | |
101 | c = crc; |
102 | c = ~c; |
103 | while (len && ((ptrdiff_t)buf & 3)) { |
104 | c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); |
105 | len--; |
106 | } |
107 | |
108 | buf4 = (const uint32_t *)(const void *)buf; |
109 | |
110 | #ifdef UNROLL_MORE |
111 | while (len >= 32) { |
112 | DOLIT32; |
113 | len -= 32; |
114 | } |
115 | #endif |
116 | |
117 | while (len >= 4) { |
118 | DOLIT4; |
119 | len -= 4; |
120 | } |
121 | buf = (const unsigned char *)buf4; |
122 | |
123 | if (len) do { |
124 | c = crc_table[0][(c ^ *buf++) & 0xff] ^ (c >> 8); |
125 | } while (--len); |
126 | c = ~c; |
127 | return c; |
128 | } |
129 | #endif /* BYTE_ORDER == LITTLE_ENDIAN */ |
130 | |
131 | /* ========================================================================= */ |
132 | #if BYTE_ORDER == BIG_ENDIAN |
133 | #define DOBIG4 c ^= *buf4++; \ |
134 | c = crc_table[4][c & 0xff] ^ crc_table[5][(c >> 8) & 0xff] ^ \ |
135 | crc_table[6][(c >> 16) & 0xff] ^ crc_table[7][c >> 24] |
136 | #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 |
137 | |
138 | /* ========================================================================= */ |
139 | ZLIB_INTERNAL uint32_t crc32_big(uint32_t crc, const unsigned char *buf, uint64_t len) { |
140 | register uint32_t c; |
141 | register const uint32_t *buf4; |
142 | |
143 | c = ZSWAP32(crc); |
144 | c = ~c; |
145 | while (len && ((ptrdiff_t)buf & 3)) { |
146 | c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); |
147 | len--; |
148 | } |
149 | |
150 | buf4 = (const uint32_t *)(const void *)buf; |
151 | |
152 | #ifdef UNROLL_MORE |
153 | while (len >= 32) { |
154 | DOBIG32; |
155 | len -= 32; |
156 | } |
157 | #endif |
158 | |
159 | while (len >= 4) { |
160 | DOBIG4; |
161 | len -= 4; |
162 | } |
163 | buf = (const unsigned char *)buf4; |
164 | |
165 | if (len) do { |
166 | c = crc_table[4][(c >> 24) ^ *buf++] ^ (c << 8); |
167 | } while (--len); |
168 | c = ~c; |
169 | return ZSWAP32(c); |
170 | } |
171 | #endif /* BYTE_ORDER == BIG_ENDIAN */ |
172 | |
173 | |
174 | /* ========================================================================= */ |
175 | static uint32_t crc32_combine_(uint32_t crc1, uint32_t crc2, z_off64_t len2) { |
176 | int n; |
177 | |
178 | if (len2 > 0) |
179 | /* operator for 2^n zeros repeats every GF2_DIM n values */ |
180 | for (n = 0; len2; n = (n + 1) % GF2_DIM, len2 >>= 1) |
181 | if (len2 & 1) |
182 | crc1 = gf2_matrix_times(crc_comb[n], crc1); |
183 | return crc1 ^ crc2; |
184 | } |
185 | |
186 | /* ========================================================================= */ |
187 | uint32_t ZEXPORT PREFIX(crc32_combine)(uint32_t crc1, uint32_t crc2, z_off_t len2) { |
188 | return crc32_combine_(crc1, crc2, len2); |
189 | } |
190 | |
191 | uint32_t ZEXPORT PREFIX(crc32_combine64)(uint32_t crc1, uint32_t crc2, z_off64_t len2) { |
192 | return crc32_combine_(crc1, crc2, len2); |
193 | } |
194 | |
195 | #ifdef X86_PCLMULQDQ_CRC |
196 | #include "arch/x86/x86.h" |
197 | #include "arch/x86/crc_folding.h" |
198 | |
199 | ZLIB_INTERNAL void crc_finalize(deflate_state *const s) { |
200 | if (x86_cpu_has_pclmulqdq) |
201 | s->strm->adler = crc_fold_512to32(s); |
202 | } |
203 | #endif |
204 | |
205 | ZLIB_INTERNAL void crc_reset(deflate_state *const s) { |
206 | #ifdef X86_PCLMULQDQ_CRC |
207 | if (x86_cpu_has_pclmulqdq) { |
208 | crc_fold_init(s); |
209 | return; |
210 | } |
211 | #endif |
212 | s->strm->adler = PREFIX(crc32)(0L, NULL, 0); |
213 | } |
214 | |
215 | ZLIB_INTERNAL void copy_with_crc(PREFIX3(stream) *strm, unsigned char *dst, unsigned long size) { |
216 | #ifdef X86_PCLMULQDQ_CRC |
217 | if (x86_cpu_has_pclmulqdq) { |
218 | crc_fold_copy(strm->state, dst, strm->next_in, size); |
219 | return; |
220 | } |
221 | #endif |
222 | memcpy(dst, strm->next_in, size); |
223 | strm->adler = PREFIX(crc32)(strm->adler, dst, size); |
224 | } |
225 | |
226 | /* ========================================================================= */ |
227 | static void crc32_combine_gen_(uint32_t *op, z_off64_t len2) |
228 | { |
229 | uint32_t row; |
230 | int j; |
231 | unsigned i; |
232 | |
233 | /* if len2 is zero or negative, return the identity matrix */ |
234 | if (len2 <= 0) { |
235 | row = 1; |
236 | for (j = 0; j < GF2_DIM; j++) { |
237 | op[j] = row; |
238 | row <<= 1; |
239 | } |
240 | return; |
241 | } |
242 | |
243 | /* at least one bit in len2 is set -- find it, and copy the operator |
244 | corresponding to that position into op */ |
245 | i = 0; |
246 | for (;;) { |
247 | if (len2 & 1) { |
248 | for (j = 0; j < GF2_DIM; j++) |
249 | op[j] = crc_comb[i][j]; |
250 | break; |
251 | } |
252 | len2 >>= 1; |
253 | i = (i + 1) % GF2_DIM; |
254 | } |
255 | |
256 | /* for each remaining bit set in len2 (if any), multiply op by the operator |
257 | corresponding to that position */ |
258 | for (;;) { |
259 | len2 >>= 1; |
260 | i = (i + 1) % GF2_DIM; |
261 | if (len2 == 0) |
262 | break; |
263 | if (len2 & 1) |
264 | for (j = 0; j < GF2_DIM; j++) |
265 | op[j] = gf2_matrix_times(crc_comb[i], op[j]); |
266 | } |
267 | } |
268 | |
269 | /* ========================================================================= */ |
270 | void ZEXPORT PREFIX(crc32_combine_gen)(uint32_t *op, z_off_t len2) |
271 | { |
272 | crc32_combine_gen_(op, len2); |
273 | } |
274 | |
275 | void ZEXPORT PREFIX(crc32_combine_gen64)(uint32_t *op, z_off64_t len2) |
276 | { |
277 | crc32_combine_gen_(op, len2); |
278 | } |
279 | |
280 | /* ========================================================================= */ |
281 | uint32_t ZEXPORT PREFIX(crc32_combine_op)(uint32_t crc1, uint32_t crc2, const uint32_t *op) |
282 | { |
283 | return gf2_matrix_times(op, crc1) ^ crc2; |
284 | } |
285 | |