1 | /* ==================================================================== |
2 | * Copyright (c) 2012 The OpenSSL Project. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * |
8 | * 1. Redistributions of source code must retain the above copyright |
9 | * notice, this list of conditions and the following disclaimer. |
10 | * |
11 | * 2. Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in |
13 | * the documentation and/or other materials provided with the |
14 | * distribution. |
15 | * |
16 | * 3. All advertising materials mentioning features or use of this |
17 | * software must display the following acknowledgment: |
18 | * "This product includes software developed by the OpenSSL Project |
19 | * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" |
20 | * |
21 | * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to |
22 | * endorse or promote products derived from this software without |
23 | * prior written permission. For written permission, please contact |
24 | * openssl-core@openssl.org. |
25 | * |
26 | * 5. Products derived from this software may not be called "OpenSSL" |
27 | * nor may "OpenSSL" appear in their names without prior written |
28 | * permission of the OpenSSL Project. |
29 | * |
30 | * 6. Redistributions of any form whatsoever must retain the following |
31 | * acknowledgment: |
32 | * "This product includes software developed by the OpenSSL Project |
33 | * for use in the OpenSSL Toolkit (http://www.openssl.org/)" |
34 | * |
35 | * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY |
36 | * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
37 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
38 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR |
39 | * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
40 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
41 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
42 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
43 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
44 | * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
45 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
46 | * OF THE POSSIBILITY OF SUCH DAMAGE. |
47 | * ==================================================================== |
48 | * |
49 | * This product includes cryptographic software written by Eric Young |
50 | * (eay@cryptsoft.com). This product includes software written by Tim |
51 | * Hudson (tjh@cryptsoft.com). */ |
52 | |
53 | #include <assert.h> |
54 | #include <string.h> |
55 | |
56 | #include <openssl/digest.h> |
57 | #include <openssl/nid.h> |
58 | #include <openssl/sha.h> |
59 | |
60 | #include "../internal.h" |
61 | #include "internal.h" |
62 | #include "../fipsmodule/cipher/internal.h" |
63 | |
64 | |
65 | // MAX_HASH_BIT_COUNT_BYTES is the maximum number of bytes in the hash's length |
66 | // field. (SHA-384/512 have 128-bit length.) |
67 | #define MAX_HASH_BIT_COUNT_BYTES 16 |
68 | |
69 | // MAX_HASH_BLOCK_SIZE is the maximum hash block size that we'll support. |
70 | // Currently SHA-384/512 has a 128-byte block size and that's the largest |
71 | // supported by TLS.) |
72 | #define MAX_HASH_BLOCK_SIZE 128 |
73 | |
74 | int EVP_tls_cbc_remove_padding(crypto_word_t *out_padding_ok, size_t *out_len, |
75 | const uint8_t *in, size_t in_len, |
76 | size_t block_size, size_t mac_size) { |
77 | const size_t overhead = 1 /* padding length byte */ + mac_size; |
78 | |
79 | // These lengths are all public so we can test them in non-constant time. |
80 | if (overhead > in_len) { |
81 | return 0; |
82 | } |
83 | |
84 | size_t padding_length = in[in_len - 1]; |
85 | |
86 | crypto_word_t good = constant_time_ge_w(in_len, overhead + padding_length); |
87 | // The padding consists of a length byte at the end of the record and |
88 | // then that many bytes of padding, all with the same value as the |
89 | // length byte. Thus, with the length byte included, there are i+1 |
90 | // bytes of padding. |
91 | // |
92 | // We can't check just |padding_length+1| bytes because that leaks |
93 | // decrypted information. Therefore we always have to check the maximum |
94 | // amount of padding possible. (Again, the length of the record is |
95 | // public information so we can use it.) |
96 | size_t to_check = 256; // maximum amount of padding, inc length byte. |
97 | if (to_check > in_len) { |
98 | to_check = in_len; |
99 | } |
100 | |
101 | for (size_t i = 0; i < to_check; i++) { |
102 | uint8_t mask = constant_time_ge_8(padding_length, i); |
103 | uint8_t b = in[in_len - 1 - i]; |
104 | // The final |padding_length+1| bytes should all have the value |
105 | // |padding_length|. Therefore the XOR should be zero. |
106 | good &= ~(mask & (padding_length ^ b)); |
107 | } |
108 | |
109 | // If any of the final |padding_length+1| bytes had the wrong value, |
110 | // one or more of the lower eight bits of |good| will be cleared. |
111 | good = constant_time_eq_w(0xff, good & 0xff); |
112 | |
113 | // Always treat |padding_length| as zero on error. If, assuming block size of |
114 | // 16, a padding of [<15 arbitrary bytes> 15] treated |padding_length| as 16 |
115 | // and returned -1, distinguishing good MAC and bad padding from bad MAC and |
116 | // bad padding would give POODLE's padding oracle. |
117 | padding_length = good & (padding_length + 1); |
118 | *out_len = in_len - padding_length; |
119 | *out_padding_ok = good; |
120 | return 1; |
121 | } |
122 | |
123 | void EVP_tls_cbc_copy_mac(uint8_t *out, size_t md_size, const uint8_t *in, |
124 | size_t in_len, size_t orig_len) { |
125 | uint8_t rotated_mac1[EVP_MAX_MD_SIZE], rotated_mac2[EVP_MAX_MD_SIZE]; |
126 | uint8_t *rotated_mac = rotated_mac1; |
127 | uint8_t *rotated_mac_tmp = rotated_mac2; |
128 | |
129 | // mac_end is the index of |in| just after the end of the MAC. |
130 | size_t mac_end = in_len; |
131 | size_t mac_start = mac_end - md_size; |
132 | |
133 | assert(orig_len >= in_len); |
134 | assert(in_len >= md_size); |
135 | assert(md_size <= EVP_MAX_MD_SIZE); |
136 | |
137 | // scan_start contains the number of bytes that we can ignore because |
138 | // the MAC's position can only vary by 255 bytes. |
139 | size_t scan_start = 0; |
140 | // This information is public so it's safe to branch based on it. |
141 | if (orig_len > md_size + 255 + 1) { |
142 | scan_start = orig_len - (md_size + 255 + 1); |
143 | } |
144 | |
145 | size_t rotate_offset = 0; |
146 | uint8_t mac_started = 0; |
147 | OPENSSL_memset(rotated_mac, 0, md_size); |
148 | for (size_t i = scan_start, j = 0; i < orig_len; i++, j++) { |
149 | if (j >= md_size) { |
150 | j -= md_size; |
151 | } |
152 | crypto_word_t is_mac_start = constant_time_eq_w(i, mac_start); |
153 | mac_started |= is_mac_start; |
154 | uint8_t mac_ended = constant_time_ge_8(i, mac_end); |
155 | rotated_mac[j] |= in[i] & mac_started & ~mac_ended; |
156 | // Save the offset that |mac_start| is mapped to. |
157 | rotate_offset |= j & is_mac_start; |
158 | } |
159 | |
160 | // Now rotate the MAC. We rotate in log(md_size) steps, one for each bit |
161 | // position. |
162 | for (size_t offset = 1; offset < md_size; offset <<= 1, rotate_offset >>= 1) { |
163 | // Rotate by |offset| iff the corresponding bit is set in |
164 | // |rotate_offset|, placing the result in |rotated_mac_tmp|. |
165 | const uint8_t skip_rotate = (rotate_offset & 1) - 1; |
166 | for (size_t i = 0, j = offset; i < md_size; i++, j++) { |
167 | if (j >= md_size) { |
168 | j -= md_size; |
169 | } |
170 | rotated_mac_tmp[i] = |
171 | constant_time_select_8(skip_rotate, rotated_mac[i], rotated_mac[j]); |
172 | } |
173 | |
174 | // Swap pointers so |rotated_mac| contains the (possibly) rotated value. |
175 | // Note the number of iterations and thus the identity of these pointers is |
176 | // public information. |
177 | uint8_t *tmp = rotated_mac; |
178 | rotated_mac = rotated_mac_tmp; |
179 | rotated_mac_tmp = tmp; |
180 | } |
181 | |
182 | OPENSSL_memcpy(out, rotated_mac, md_size); |
183 | } |
184 | |
185 | // u32toBE serialises an unsigned, 32-bit number (n) as four bytes at (p) in |
186 | // big-endian order. The value of p is advanced by four. |
187 | #define u32toBE(n, p) \ |
188 | do { \ |
189 | *((p)++) = (uint8_t)((n) >> 24); \ |
190 | *((p)++) = (uint8_t)((n) >> 16); \ |
191 | *((p)++) = (uint8_t)((n) >> 8); \ |
192 | *((p)++) = (uint8_t)((n)); \ |
193 | } while (0) |
194 | |
195 | // u64toBE serialises an unsigned, 64-bit number (n) as eight bytes at (p) in |
196 | // big-endian order. The value of p is advanced by eight. |
197 | #define u64toBE(n, p) \ |
198 | do { \ |
199 | *((p)++) = (uint8_t)((n) >> 56); \ |
200 | *((p)++) = (uint8_t)((n) >> 48); \ |
201 | *((p)++) = (uint8_t)((n) >> 40); \ |
202 | *((p)++) = (uint8_t)((n) >> 32); \ |
203 | *((p)++) = (uint8_t)((n) >> 24); \ |
204 | *((p)++) = (uint8_t)((n) >> 16); \ |
205 | *((p)++) = (uint8_t)((n) >> 8); \ |
206 | *((p)++) = (uint8_t)((n)); \ |
207 | } while (0) |
208 | |
209 | typedef union { |
210 | SHA_CTX sha1; |
211 | SHA256_CTX sha256; |
212 | SHA512_CTX sha512; |
213 | } HASH_CTX; |
214 | |
215 | static void tls1_sha1_transform(HASH_CTX *ctx, const uint8_t *block) { |
216 | SHA1_Transform(&ctx->sha1, block); |
217 | } |
218 | |
219 | static void tls1_sha256_transform(HASH_CTX *ctx, const uint8_t *block) { |
220 | SHA256_Transform(&ctx->sha256, block); |
221 | } |
222 | |
223 | static void tls1_sha512_transform(HASH_CTX *ctx, const uint8_t *block) { |
224 | SHA512_Transform(&ctx->sha512, block); |
225 | } |
226 | |
227 | // These functions serialize the state of a hash and thus perform the standard |
228 | // "final" operation without adding the padding and length that such a function |
229 | // typically does. |
230 | static void tls1_sha1_final_raw(HASH_CTX *ctx, uint8_t *md_out) { |
231 | SHA_CTX *sha1 = &ctx->sha1; |
232 | u32toBE(sha1->h[0], md_out); |
233 | u32toBE(sha1->h[1], md_out); |
234 | u32toBE(sha1->h[2], md_out); |
235 | u32toBE(sha1->h[3], md_out); |
236 | u32toBE(sha1->h[4], md_out); |
237 | } |
238 | |
239 | static void tls1_sha256_final_raw(HASH_CTX *ctx, uint8_t *md_out) { |
240 | SHA256_CTX *sha256 = &ctx->sha256; |
241 | for (unsigned i = 0; i < 8; i++) { |
242 | u32toBE(sha256->h[i], md_out); |
243 | } |
244 | } |
245 | |
246 | static void tls1_sha512_final_raw(HASH_CTX *ctx, uint8_t *md_out) { |
247 | SHA512_CTX *sha512 = &ctx->sha512; |
248 | for (unsigned i = 0; i < 8; i++) { |
249 | u64toBE(sha512->h[i], md_out); |
250 | } |
251 | } |
252 | |
253 | int EVP_tls_cbc_record_digest_supported(const EVP_MD *md) { |
254 | switch (EVP_MD_type(md)) { |
255 | case NID_sha1: |
256 | case NID_sha256: |
257 | case NID_sha384: |
258 | return 1; |
259 | |
260 | default: |
261 | return 0; |
262 | } |
263 | } |
264 | |
265 | int EVP_tls_cbc_digest_record(const EVP_MD *md, uint8_t *md_out, |
266 | size_t *md_out_size, const uint8_t [13], |
267 | const uint8_t *data, size_t data_plus_mac_size, |
268 | size_t data_plus_mac_plus_padding_size, |
269 | const uint8_t *mac_secret, |
270 | unsigned mac_secret_length) { |
271 | HASH_CTX md_state; |
272 | void (*md_final_raw)(HASH_CTX *ctx, uint8_t *md_out); |
273 | void (*md_transform)(HASH_CTX *ctx, const uint8_t *block); |
274 | unsigned md_size, md_block_size = 64, md_block_shift = 6; |
275 | // md_length_size is the number of bytes in the length field that terminates |
276 | // the hash. |
277 | unsigned md_length_size = 8; |
278 | |
279 | // Bound the acceptable input so we can forget about many possible overflows |
280 | // later in this function. This is redundant with the record size limits in |
281 | // TLS. |
282 | if (data_plus_mac_plus_padding_size >= 1024 * 1024) { |
283 | assert(0); |
284 | return 0; |
285 | } |
286 | |
287 | switch (EVP_MD_type(md)) { |
288 | case NID_sha1: |
289 | SHA1_Init(&md_state.sha1); |
290 | md_final_raw = tls1_sha1_final_raw; |
291 | md_transform = tls1_sha1_transform; |
292 | md_size = SHA_DIGEST_LENGTH; |
293 | break; |
294 | |
295 | case NID_sha256: |
296 | SHA256_Init(&md_state.sha256); |
297 | md_final_raw = tls1_sha256_final_raw; |
298 | md_transform = tls1_sha256_transform; |
299 | md_size = SHA256_DIGEST_LENGTH; |
300 | break; |
301 | |
302 | case NID_sha384: |
303 | SHA384_Init(&md_state.sha512); |
304 | md_final_raw = tls1_sha512_final_raw; |
305 | md_transform = tls1_sha512_transform; |
306 | md_size = SHA384_DIGEST_LENGTH; |
307 | md_block_size = 128; |
308 | md_block_shift = 7; |
309 | md_length_size = 16; |
310 | break; |
311 | |
312 | default: |
313 | // EVP_tls_cbc_record_digest_supported should have been called first to |
314 | // check that the hash function is supported. |
315 | assert(0); |
316 | *md_out_size = 0; |
317 | return 0; |
318 | } |
319 | |
320 | assert(md_length_size <= MAX_HASH_BIT_COUNT_BYTES); |
321 | assert(md_block_size <= MAX_HASH_BLOCK_SIZE); |
322 | assert(md_block_size == (1u << md_block_shift)); |
323 | assert(md_size <= EVP_MAX_MD_SIZE); |
324 | |
325 | static const size_t = 13; |
326 | |
327 | // kVarianceBlocks is the number of blocks of the hash that we have to |
328 | // calculate in constant time because they could be altered by the |
329 | // padding value. |
330 | // |
331 | // TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not |
332 | // required to be minimal. Therefore we say that the final |kVarianceBlocks| |
333 | // blocks can vary based on the padding and on the hash used. This value |
334 | // must be derived from public information. |
335 | const size_t kVarianceBlocks = |
336 | ( 255 + 1 + // maximum padding bytes + padding length |
337 | md_size + // length of hash's output |
338 | md_block_size - 1 // ceiling |
339 | ) / md_block_size |
340 | + 1; // the 0x80 marker and the encoded message length could or not |
341 | // require an extra block; since the exact value depends on the |
342 | // message length; thus, one extra block is always added to run |
343 | // in constant time. |
344 | |
345 | // From now on we're dealing with the MAC, which conceptually has 13 |
346 | // bytes of `header' before the start of the data. |
347 | size_t len = data_plus_mac_plus_padding_size + kHeaderLength; |
348 | // max_mac_bytes contains the maximum bytes of bytes in the MAC, including |
349 | // |header|, assuming that there's no padding. |
350 | size_t max_mac_bytes = len - md_size - 1; |
351 | // num_blocks is the maximum number of hash blocks. |
352 | size_t num_blocks = |
353 | (max_mac_bytes + 1 + md_length_size + md_block_size - 1) / md_block_size; |
354 | // In order to calculate the MAC in constant time we have to handle |
355 | // the final blocks specially because the padding value could cause the |
356 | // end to appear somewhere in the final |kVarianceBlocks| blocks and we |
357 | // can't leak where. However, |num_starting_blocks| worth of data can |
358 | // be hashed right away because no padding value can affect whether |
359 | // they are plaintext. |
360 | size_t num_starting_blocks = 0; |
361 | // k is the starting byte offset into the conceptual header||data where |
362 | // we start processing. |
363 | size_t k = 0; |
364 | // mac_end_offset is the index just past the end of the data to be MACed. |
365 | size_t mac_end_offset = data_plus_mac_size + kHeaderLength - md_size; |
366 | // c is the index of the 0x80 byte in the final hash block that contains |
367 | // application data. |
368 | size_t c = mac_end_offset & (md_block_size - 1); |
369 | // index_a is the hash block number that contains the 0x80 terminating value. |
370 | size_t index_a = mac_end_offset >> md_block_shift; |
371 | // index_b is the hash block number that contains the 64-bit hash length, in |
372 | // bits. |
373 | size_t index_b = (mac_end_offset + md_length_size) >> md_block_shift; |
374 | |
375 | if (num_blocks > kVarianceBlocks) { |
376 | num_starting_blocks = num_blocks - kVarianceBlocks; |
377 | k = md_block_size * num_starting_blocks; |
378 | } |
379 | |
380 | // bits is the hash-length in bits. It includes the additional hash |
381 | // block for the masked HMAC key. |
382 | size_t bits = 8 * mac_end_offset; // at most 18 bits to represent |
383 | |
384 | // Compute the initial HMAC block. |
385 | bits += 8 * md_block_size; |
386 | // hmac_pad is the masked HMAC key. |
387 | uint8_t hmac_pad[MAX_HASH_BLOCK_SIZE]; |
388 | OPENSSL_memset(hmac_pad, 0, md_block_size); |
389 | assert(mac_secret_length <= sizeof(hmac_pad)); |
390 | OPENSSL_memcpy(hmac_pad, mac_secret, mac_secret_length); |
391 | for (size_t i = 0; i < md_block_size; i++) { |
392 | hmac_pad[i] ^= 0x36; |
393 | } |
394 | |
395 | md_transform(&md_state, hmac_pad); |
396 | |
397 | // The length check means |bits| fits in four bytes. |
398 | uint8_t length_bytes[MAX_HASH_BIT_COUNT_BYTES]; |
399 | OPENSSL_memset(length_bytes, 0, md_length_size - 4); |
400 | length_bytes[md_length_size - 4] = (uint8_t)(bits >> 24); |
401 | length_bytes[md_length_size - 3] = (uint8_t)(bits >> 16); |
402 | length_bytes[md_length_size - 2] = (uint8_t)(bits >> 8); |
403 | length_bytes[md_length_size - 1] = (uint8_t)bits; |
404 | |
405 | if (k > 0) { |
406 | // k is a multiple of md_block_size. |
407 | uint8_t first_block[MAX_HASH_BLOCK_SIZE]; |
408 | OPENSSL_memcpy(first_block, header, 13); |
409 | OPENSSL_memcpy(first_block + 13, data, md_block_size - 13); |
410 | md_transform(&md_state, first_block); |
411 | for (size_t i = 1; i < k / md_block_size; i++) { |
412 | md_transform(&md_state, data + md_block_size * i - 13); |
413 | } |
414 | } |
415 | |
416 | uint8_t mac_out[EVP_MAX_MD_SIZE]; |
417 | OPENSSL_memset(mac_out, 0, sizeof(mac_out)); |
418 | |
419 | // We now process the final hash blocks. For each block, we construct |
420 | // it in constant time. If the |i==index_a| then we'll include the 0x80 |
421 | // bytes and zero pad etc. For each block we selectively copy it, in |
422 | // constant time, to |mac_out|. |
423 | for (size_t i = num_starting_blocks; |
424 | i <= num_starting_blocks + kVarianceBlocks; i++) { |
425 | uint8_t block[MAX_HASH_BLOCK_SIZE]; |
426 | uint8_t is_block_a = constant_time_eq_8(i, index_a); |
427 | uint8_t is_block_b = constant_time_eq_8(i, index_b); |
428 | for (size_t j = 0; j < md_block_size; j++) { |
429 | uint8_t b = 0; |
430 | if (k < kHeaderLength) { |
431 | b = header[k]; |
432 | } else if (k < data_plus_mac_plus_padding_size + kHeaderLength) { |
433 | b = data[k - kHeaderLength]; |
434 | } |
435 | k++; |
436 | |
437 | uint8_t is_past_c = is_block_a & constant_time_ge_8(j, c); |
438 | uint8_t is_past_cp1 = is_block_a & constant_time_ge_8(j, c + 1); |
439 | // If this is the block containing the end of the |
440 | // application data, and we are at the offset for the |
441 | // 0x80 value, then overwrite b with 0x80. |
442 | b = constant_time_select_8(is_past_c, 0x80, b); |
443 | // If this the the block containing the end of the |
444 | // application data and we're past the 0x80 value then |
445 | // just write zero. |
446 | b = b & ~is_past_cp1; |
447 | // If this is index_b (the final block), but not |
448 | // index_a (the end of the data), then the 64-bit |
449 | // length didn't fit into index_a and we're having to |
450 | // add an extra block of zeros. |
451 | b &= ~is_block_b | is_block_a; |
452 | |
453 | // The final bytes of one of the blocks contains the |
454 | // length. |
455 | if (j >= md_block_size - md_length_size) { |
456 | // If this is index_b, write a length byte. |
457 | b = constant_time_select_8( |
458 | is_block_b, length_bytes[j - (md_block_size - md_length_size)], b); |
459 | } |
460 | block[j] = b; |
461 | } |
462 | |
463 | md_transform(&md_state, block); |
464 | md_final_raw(&md_state, block); |
465 | // If this is index_b, copy the hash value to |mac_out|. |
466 | for (size_t j = 0; j < md_size; j++) { |
467 | mac_out[j] |= block[j] & is_block_b; |
468 | } |
469 | } |
470 | |
471 | EVP_MD_CTX md_ctx; |
472 | EVP_MD_CTX_init(&md_ctx); |
473 | if (!EVP_DigestInit_ex(&md_ctx, md, NULL /* engine */)) { |
474 | EVP_MD_CTX_cleanup(&md_ctx); |
475 | return 0; |
476 | } |
477 | |
478 | // Complete the HMAC in the standard manner. |
479 | for (size_t i = 0; i < md_block_size; i++) { |
480 | hmac_pad[i] ^= 0x6a; |
481 | } |
482 | |
483 | EVP_DigestUpdate(&md_ctx, hmac_pad, md_block_size); |
484 | EVP_DigestUpdate(&md_ctx, mac_out, md_size); |
485 | unsigned md_out_size_u; |
486 | EVP_DigestFinal(&md_ctx, md_out, &md_out_size_u); |
487 | *md_out_size = md_out_size_u; |
488 | EVP_MD_CTX_cleanup(&md_ctx); |
489 | |
490 | return 1; |
491 | } |
492 | |