1 | /* |
2 | * Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"). |
5 | * You may not use this file except in compliance with the License. |
6 | * A copy of the License is located at |
7 | * |
8 | * http://aws.amazon.com/apache2.0 |
9 | * |
10 | * or in the "license" file accompanying this file. This file is distributed |
11 | * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either |
12 | * express or implied. See the License for the specific language governing |
13 | * permissions and limitations under the License. |
14 | */ |
15 | |
16 | #include <aws/common/byte_buf.h> |
17 | #include <aws/common/private/byte_buf.h> |
18 | |
19 | #include <stdarg.h> |
20 | |
21 | #ifdef _MSC_VER |
22 | /* disables warning non const declared initializers for Microsoft compilers */ |
23 | # pragma warning(disable : 4204) |
24 | # pragma warning(disable : 4706) |
25 | #endif |
26 | |
27 | int aws_byte_buf_init(struct aws_byte_buf *buf, struct aws_allocator *allocator, size_t capacity) { |
28 | AWS_PRECONDITION(buf); |
29 | AWS_PRECONDITION(allocator); |
30 | |
31 | buf->buffer = (capacity == 0) ? NULL : aws_mem_acquire(allocator, capacity); |
32 | if (capacity != 0 && buf->buffer == NULL) { |
33 | return AWS_OP_ERR; |
34 | } |
35 | |
36 | buf->len = 0; |
37 | buf->capacity = capacity; |
38 | buf->allocator = allocator; |
39 | AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); |
40 | return AWS_OP_SUCCESS; |
41 | } |
42 | |
43 | int aws_byte_buf_init_copy(struct aws_byte_buf *dest, struct aws_allocator *allocator, const struct aws_byte_buf *src) { |
44 | AWS_PRECONDITION(allocator); |
45 | AWS_PRECONDITION(dest); |
46 | AWS_ERROR_PRECONDITION(aws_byte_buf_is_valid(src)); |
47 | |
48 | if (!src->buffer) { |
49 | AWS_ZERO_STRUCT(*dest); |
50 | dest->allocator = allocator; |
51 | AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); |
52 | return AWS_OP_SUCCESS; |
53 | } |
54 | |
55 | *dest = *src; |
56 | dest->allocator = allocator; |
57 | dest->buffer = (uint8_t *)aws_mem_acquire(allocator, src->capacity); |
58 | if (dest->buffer == NULL) { |
59 | AWS_ZERO_STRUCT(*dest); |
60 | return AWS_OP_ERR; |
61 | } |
62 | memcpy(dest->buffer, src->buffer, src->len); |
63 | AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); |
64 | return AWS_OP_SUCCESS; |
65 | } |
66 | |
67 | bool aws_byte_buf_is_valid(const struct aws_byte_buf *const buf) { |
68 | return buf && ((buf->capacity == 0 && buf->len == 0 && buf->buffer == NULL) || |
69 | (buf->capacity > 0 && buf->len <= buf->capacity && AWS_MEM_IS_WRITABLE(buf->buffer, buf->len))); |
70 | } |
71 | |
72 | bool aws_byte_cursor_is_valid(const struct aws_byte_cursor *cursor) { |
73 | return cursor && |
74 | ((cursor->len == 0) || (cursor->len > 0 && cursor->ptr && AWS_MEM_IS_READABLE(cursor->ptr, cursor->len))); |
75 | } |
76 | |
77 | void aws_byte_buf_reset(struct aws_byte_buf *buf, bool zero_contents) { |
78 | if (zero_contents) { |
79 | aws_byte_buf_secure_zero(buf); |
80 | } |
81 | buf->len = 0; |
82 | } |
83 | |
84 | void aws_byte_buf_clean_up(struct aws_byte_buf *buf) { |
85 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
86 | if (buf->allocator && buf->buffer) { |
87 | aws_mem_release(buf->allocator, (void *)buf->buffer); |
88 | } |
89 | buf->allocator = NULL; |
90 | buf->buffer = NULL; |
91 | buf->len = 0; |
92 | buf->capacity = 0; |
93 | } |
94 | |
95 | void aws_byte_buf_secure_zero(struct aws_byte_buf *buf) { |
96 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
97 | if (buf->buffer) { |
98 | aws_secure_zero(buf->buffer, buf->capacity); |
99 | } |
100 | buf->len = 0; |
101 | AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); |
102 | } |
103 | |
104 | void aws_byte_buf_clean_up_secure(struct aws_byte_buf *buf) { |
105 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
106 | aws_byte_buf_secure_zero(buf); |
107 | aws_byte_buf_clean_up(buf); |
108 | AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); |
109 | } |
110 | |
111 | bool aws_byte_buf_eq(const struct aws_byte_buf *const a, const struct aws_byte_buf *const b) { |
112 | AWS_PRECONDITION(aws_byte_buf_is_valid(a)); |
113 | AWS_PRECONDITION(aws_byte_buf_is_valid(b)); |
114 | bool rval = aws_array_eq(a->buffer, a->len, b->buffer, b->len); |
115 | AWS_POSTCONDITION(aws_byte_buf_is_valid(a)); |
116 | AWS_POSTCONDITION(aws_byte_buf_is_valid(b)); |
117 | return rval; |
118 | } |
119 | |
120 | bool aws_byte_buf_eq_ignore_case(const struct aws_byte_buf *const a, const struct aws_byte_buf *const b) { |
121 | AWS_PRECONDITION(aws_byte_buf_is_valid(a)); |
122 | AWS_PRECONDITION(aws_byte_buf_is_valid(b)); |
123 | bool rval = aws_array_eq_ignore_case(a->buffer, a->len, b->buffer, b->len); |
124 | AWS_POSTCONDITION(aws_byte_buf_is_valid(a)); |
125 | AWS_POSTCONDITION(aws_byte_buf_is_valid(b)); |
126 | return rval; |
127 | } |
128 | |
129 | bool aws_byte_buf_eq_c_str(const struct aws_byte_buf *const buf, const char *const c_str) { |
130 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
131 | AWS_PRECONDITION(c_str != NULL); |
132 | bool rval = aws_array_eq_c_str(buf->buffer, buf->len, c_str); |
133 | AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); |
134 | return rval; |
135 | } |
136 | |
137 | bool aws_byte_buf_eq_c_str_ignore_case(const struct aws_byte_buf *const buf, const char *const c_str) { |
138 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
139 | AWS_PRECONDITION(c_str != NULL); |
140 | bool rval = aws_array_eq_c_str_ignore_case(buf->buffer, buf->len, c_str); |
141 | AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); |
142 | return rval; |
143 | } |
144 | |
145 | int aws_byte_buf_init_copy_from_cursor( |
146 | struct aws_byte_buf *dest, |
147 | struct aws_allocator *allocator, |
148 | struct aws_byte_cursor src) { |
149 | AWS_PRECONDITION(allocator); |
150 | AWS_PRECONDITION(dest); |
151 | AWS_ERROR_PRECONDITION(aws_byte_cursor_is_valid(&src)); |
152 | |
153 | AWS_ZERO_STRUCT(*dest); |
154 | |
155 | dest->buffer = (src.len > 0) ? (uint8_t *)aws_mem_acquire(allocator, src.len) : NULL; |
156 | if (src.len != 0 && dest->buffer == NULL) { |
157 | return AWS_OP_ERR; |
158 | } |
159 | |
160 | dest->len = src.len; |
161 | dest->capacity = src.len; |
162 | dest->allocator = allocator; |
163 | if (src.len > 0) { |
164 | memcpy(dest->buffer, src.ptr, src.len); |
165 | } |
166 | AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); |
167 | return AWS_OP_SUCCESS; |
168 | } |
169 | |
170 | bool aws_byte_cursor_next_split( |
171 | const struct aws_byte_cursor *AWS_RESTRICT input_str, |
172 | char split_on, |
173 | struct aws_byte_cursor *AWS_RESTRICT substr) { |
174 | |
175 | bool first_run = false; |
176 | if (!substr->ptr) { |
177 | first_run = true; |
178 | substr->ptr = input_str->ptr; |
179 | substr->len = 0; |
180 | } |
181 | |
182 | if (substr->ptr > input_str->ptr + input_str->len) { |
183 | /* This will hit if the last substring returned was an empty string after terminating split_on. */ |
184 | AWS_ZERO_STRUCT(*substr); |
185 | return false; |
186 | } |
187 | |
188 | /* Calculate first byte to search. */ |
189 | substr->ptr += substr->len; |
190 | /* Remaining bytes is the number we started with minus the number of bytes already read. */ |
191 | substr->len = input_str->len - (substr->ptr - input_str->ptr); |
192 | |
193 | if (!first_run && substr->len == 0) { |
194 | /* This will hit if the string doesn't end with split_on but we're done. */ |
195 | AWS_ZERO_STRUCT(*substr); |
196 | return false; |
197 | } |
198 | |
199 | if (!first_run && *substr->ptr == split_on) { |
200 | /* If not first rodeo and the character after substr is split_on, skip. */ |
201 | ++substr->ptr; |
202 | --substr->len; |
203 | |
204 | if (substr->len == 0) { |
205 | /* If split character was last in the string, return empty substr. */ |
206 | return true; |
207 | } |
208 | } |
209 | |
210 | uint8_t *new_location = memchr(substr->ptr, split_on, substr->len); |
211 | if (new_location) { |
212 | |
213 | /* Character found, update string length. */ |
214 | substr->len = new_location - substr->ptr; |
215 | } |
216 | |
217 | return true; |
218 | } |
219 | |
220 | int aws_byte_cursor_split_on_char_n( |
221 | const struct aws_byte_cursor *AWS_RESTRICT input_str, |
222 | char split_on, |
223 | size_t n, |
224 | struct aws_array_list *AWS_RESTRICT output) { |
225 | AWS_ASSERT(input_str && input_str->ptr); |
226 | AWS_ASSERT(output); |
227 | AWS_ASSERT(output->item_size >= sizeof(struct aws_byte_cursor)); |
228 | |
229 | size_t max_splits = n > 0 ? n : SIZE_MAX; |
230 | size_t split_count = 0; |
231 | |
232 | struct aws_byte_cursor substr; |
233 | AWS_ZERO_STRUCT(substr); |
234 | |
235 | /* Until we run out of substrs or hit the max split count, keep iterating and pushing into the array list. */ |
236 | while (split_count <= max_splits && aws_byte_cursor_next_split(input_str, split_on, &substr)) { |
237 | |
238 | if (split_count == max_splits) { |
239 | /* If this is the last split, take the rest of the string. */ |
240 | substr.len = input_str->len - (substr.ptr - input_str->ptr); |
241 | } |
242 | |
243 | if (AWS_UNLIKELY(aws_array_list_push_back(output, (const void *)&substr))) { |
244 | return AWS_OP_ERR; |
245 | } |
246 | ++split_count; |
247 | } |
248 | |
249 | return AWS_OP_SUCCESS; |
250 | } |
251 | |
252 | int aws_byte_cursor_split_on_char( |
253 | const struct aws_byte_cursor *AWS_RESTRICT input_str, |
254 | char split_on, |
255 | struct aws_array_list *AWS_RESTRICT output) { |
256 | |
257 | return aws_byte_cursor_split_on_char_n(input_str, split_on, 0, output); |
258 | } |
259 | |
260 | int aws_byte_cursor_find_exact( |
261 | const struct aws_byte_cursor *AWS_RESTRICT input_str, |
262 | const struct aws_byte_cursor *AWS_RESTRICT to_find, |
263 | struct aws_byte_cursor *first_find) { |
264 | if (to_find->len > input_str->len) { |
265 | return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND); |
266 | } |
267 | |
268 | if (to_find->len < 1) { |
269 | return aws_raise_error(AWS_ERROR_SHORT_BUFFER); |
270 | } |
271 | |
272 | struct aws_byte_cursor working_cur = *input_str; |
273 | |
274 | while (working_cur.len) { |
275 | uint8_t *first_char_location = memchr(working_cur.ptr, (char)*to_find->ptr, working_cur.len); |
276 | |
277 | if (!first_char_location) { |
278 | return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND); |
279 | } |
280 | |
281 | aws_byte_cursor_advance(&working_cur, first_char_location - working_cur.ptr); |
282 | |
283 | if (working_cur.len < to_find->len) { |
284 | return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND); |
285 | } |
286 | |
287 | if (!memcmp(working_cur.ptr, to_find->ptr, to_find->len)) { |
288 | *first_find = working_cur; |
289 | return AWS_OP_SUCCESS; |
290 | } |
291 | |
292 | aws_byte_cursor_advance(&working_cur, 1); |
293 | } |
294 | |
295 | return aws_raise_error(AWS_ERROR_STRING_MATCH_NOT_FOUND); |
296 | } |
297 | |
298 | int aws_byte_buf_cat(struct aws_byte_buf *dest, size_t number_of_args, ...) { |
299 | AWS_PRECONDITION(aws_byte_buf_is_valid(dest)); |
300 | |
301 | va_list ap; |
302 | va_start(ap, number_of_args); |
303 | |
304 | for (size_t i = 0; i < number_of_args; ++i) { |
305 | struct aws_byte_buf *buffer = va_arg(ap, struct aws_byte_buf *); |
306 | struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(buffer); |
307 | |
308 | if (aws_byte_buf_append(dest, &cursor)) { |
309 | va_end(ap); |
310 | AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); |
311 | return AWS_OP_ERR; |
312 | } |
313 | } |
314 | |
315 | va_end(ap); |
316 | AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); |
317 | return AWS_OP_SUCCESS; |
318 | } |
319 | |
320 | bool aws_byte_cursor_eq(const struct aws_byte_cursor *a, const struct aws_byte_cursor *b) { |
321 | AWS_PRECONDITION(aws_byte_cursor_is_valid(a)); |
322 | AWS_PRECONDITION(aws_byte_cursor_is_valid(b)); |
323 | bool rv = aws_array_eq(a->ptr, a->len, b->ptr, b->len); |
324 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(a)); |
325 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(b)); |
326 | return rv; |
327 | } |
328 | |
329 | bool aws_byte_cursor_eq_ignore_case(const struct aws_byte_cursor *a, const struct aws_byte_cursor *b) { |
330 | AWS_PRECONDITION(aws_byte_cursor_is_valid(a)); |
331 | AWS_PRECONDITION(aws_byte_cursor_is_valid(b)); |
332 | bool rv = aws_array_eq_ignore_case(a->ptr, a->len, b->ptr, b->len); |
333 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(a)); |
334 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(b)); |
335 | return rv; |
336 | } |
337 | |
338 | /* Every possible uint8_t value, lowercased */ |
339 | static const uint8_t s_tolower_table[256] = { |
340 | 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, |
341 | 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, |
342 | 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 'a', |
343 | 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', |
344 | 'x', 'y', 'z', 91, 92, 93, 94, 95, 96, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', |
345 | 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 123, 124, 125, 126, 127, 128, 129, 130, 131, |
346 | 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, |
347 | 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, |
348 | 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, |
349 | 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, |
350 | 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, |
351 | 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255}; |
352 | |
353 | const uint8_t *aws_lookup_table_to_lower_get(void) { |
354 | return s_tolower_table; |
355 | } |
356 | |
357 | bool aws_array_eq_ignore_case( |
358 | const void *const array_a, |
359 | const size_t len_a, |
360 | const void *const array_b, |
361 | const size_t len_b) { |
362 | AWS_PRECONDITION( |
363 | (len_a == 0) || AWS_MEM_IS_READABLE(array_a, len_a), "Input array [array_a] must be readable up to [len_a]." ); |
364 | AWS_PRECONDITION( |
365 | (len_b == 0) || AWS_MEM_IS_READABLE(array_b, len_b), "Input array [array_b] must be readable up to [len_b]." ); |
366 | |
367 | if (len_a != len_b) { |
368 | return false; |
369 | } |
370 | |
371 | const uint8_t *bytes_a = array_a; |
372 | const uint8_t *bytes_b = array_b; |
373 | for (size_t i = 0; i < len_a; ++i) { |
374 | if (s_tolower_table[bytes_a[i]] != s_tolower_table[bytes_b[i]]) { |
375 | return false; |
376 | } |
377 | } |
378 | |
379 | return true; |
380 | } |
381 | |
382 | bool aws_array_eq(const void *const array_a, const size_t len_a, const void *const array_b, const size_t len_b) { |
383 | AWS_PRECONDITION( |
384 | (len_a == 0) || AWS_MEM_IS_READABLE(array_a, len_a), "Input array [array_a] must be readable up to [len_a]." ); |
385 | AWS_PRECONDITION( |
386 | (len_b == 0) || AWS_MEM_IS_READABLE(array_b, len_b), "Input array [array_b] must be readable up to [len_b]." ); |
387 | |
388 | if (len_a != len_b) { |
389 | return false; |
390 | } |
391 | |
392 | if (len_a == 0) { |
393 | return true; |
394 | } |
395 | |
396 | return !memcmp(array_a, array_b, len_a); |
397 | } |
398 | |
399 | bool aws_array_eq_c_str_ignore_case(const void *const array, const size_t array_len, const char *const c_str) { |
400 | AWS_PRECONDITION( |
401 | array || (array_len == 0), |
402 | "Either input pointer [array_a] mustn't be NULL or input [array_len] mustn't be zero." ); |
403 | AWS_PRECONDITION(c_str != NULL); |
404 | |
405 | /* Simpler implementation could have been: |
406 | * return aws_array_eq_ignore_case(array, array_len, c_str, strlen(c_str)); |
407 | * but that would have traversed c_str twice. |
408 | * This implementation traverses c_str just once. */ |
409 | |
410 | const uint8_t *array_bytes = array; |
411 | const uint8_t *str_bytes = (const uint8_t *)c_str; |
412 | |
413 | for (size_t i = 0; i < array_len; ++i) { |
414 | uint8_t s = str_bytes[i]; |
415 | if (s == '\0') { |
416 | return false; |
417 | } |
418 | |
419 | if (s_tolower_table[array_bytes[i]] != s_tolower_table[s]) { |
420 | return false; |
421 | } |
422 | } |
423 | |
424 | return str_bytes[array_len] == '\0'; |
425 | } |
426 | |
427 | bool aws_array_eq_c_str(const void *const array, const size_t array_len, const char *const c_str) { |
428 | AWS_PRECONDITION( |
429 | array || (array_len == 0), |
430 | "Either input pointer [array_a] mustn't be NULL or input [array_len] mustn't be zero." ); |
431 | AWS_PRECONDITION(c_str != NULL); |
432 | |
433 | /* Simpler implementation could have been: |
434 | * return aws_array_eq(array, array_len, c_str, strlen(c_str)); |
435 | * but that would have traversed c_str twice. |
436 | * This implementation traverses c_str just once. */ |
437 | |
438 | const uint8_t *array_bytes = array; |
439 | const uint8_t *str_bytes = (const uint8_t *)c_str; |
440 | |
441 | for (size_t i = 0; i < array_len; ++i) { |
442 | uint8_t s = str_bytes[i]; |
443 | if (s == '\0') { |
444 | return false; |
445 | } |
446 | |
447 | if (array_bytes[i] != s) { |
448 | return false; |
449 | } |
450 | } |
451 | |
452 | return str_bytes[array_len] == '\0'; |
453 | } |
454 | |
455 | uint64_t aws_hash_array_ignore_case(const void *array, const size_t len) { |
456 | AWS_PRECONDITION(AWS_MEM_IS_READABLE(array, len)); |
457 | /* FNV-1a: https://en.wikipedia.org/wiki/Fowler%E2%80%93Noll%E2%80%93Vo_hash_function */ |
458 | const uint64_t fnv_offset_basis = 0xcbf29ce484222325ULL; |
459 | const uint64_t fnv_prime = 0x100000001b3ULL; |
460 | |
461 | const uint8_t *i = array; |
462 | const uint8_t *end = i + len; |
463 | |
464 | uint64_t hash = fnv_offset_basis; |
465 | while (i != end) { |
466 | const uint8_t lower = s_tolower_table[*i++]; |
467 | hash ^= lower; |
468 | #ifdef CBMC |
469 | # pragma CPROVER check push |
470 | # pragma CPROVER check disable "unsigned-overflow" |
471 | #endif |
472 | hash *= fnv_prime; |
473 | #ifdef CBMC |
474 | # pragma CPROVER check pop |
475 | #endif |
476 | } |
477 | return hash; |
478 | } |
479 | |
480 | uint64_t aws_hash_byte_cursor_ptr_ignore_case(const void *item) { |
481 | AWS_PRECONDITION(aws_byte_cursor_is_valid(item)); |
482 | const struct aws_byte_cursor *const cursor = item; |
483 | uint64_t rval = aws_hash_array_ignore_case(cursor->ptr, cursor->len); |
484 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(item)); |
485 | return rval; |
486 | } |
487 | |
488 | bool aws_byte_cursor_eq_byte_buf(const struct aws_byte_cursor *const a, const struct aws_byte_buf *const b) { |
489 | AWS_PRECONDITION(aws_byte_cursor_is_valid(a)); |
490 | AWS_PRECONDITION(aws_byte_buf_is_valid(b)); |
491 | bool rv = aws_array_eq(a->ptr, a->len, b->buffer, b->len); |
492 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(a)); |
493 | AWS_POSTCONDITION(aws_byte_buf_is_valid(b)); |
494 | return rv; |
495 | } |
496 | |
497 | bool aws_byte_cursor_eq_byte_buf_ignore_case( |
498 | const struct aws_byte_cursor *const a, |
499 | const struct aws_byte_buf *const b) { |
500 | AWS_PRECONDITION(aws_byte_cursor_is_valid(a)); |
501 | AWS_PRECONDITION(aws_byte_buf_is_valid(b)); |
502 | bool rv = aws_array_eq_ignore_case(a->ptr, a->len, b->buffer, b->len); |
503 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(a)); |
504 | AWS_POSTCONDITION(aws_byte_buf_is_valid(b)); |
505 | return rv; |
506 | } |
507 | |
508 | bool aws_byte_cursor_eq_c_str(const struct aws_byte_cursor *const cursor, const char *const c_str) { |
509 | AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor)); |
510 | AWS_PRECONDITION(c_str != NULL); |
511 | bool rv = aws_array_eq_c_str(cursor->ptr, cursor->len, c_str); |
512 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor)); |
513 | return rv; |
514 | } |
515 | |
516 | bool aws_byte_cursor_eq_c_str_ignore_case(const struct aws_byte_cursor *const cursor, const char *const c_str) { |
517 | AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor)); |
518 | AWS_PRECONDITION(c_str != NULL); |
519 | bool rv = aws_array_eq_c_str_ignore_case(cursor->ptr, cursor->len, c_str); |
520 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor)); |
521 | return rv; |
522 | } |
523 | |
524 | int aws_byte_buf_append(struct aws_byte_buf *to, const struct aws_byte_cursor *from) { |
525 | AWS_PRECONDITION(aws_byte_buf_is_valid(to)); |
526 | AWS_PRECONDITION(aws_byte_cursor_is_valid(from)); |
527 | |
528 | if (to->capacity - to->len < from->len) { |
529 | AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); |
530 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); |
531 | return aws_raise_error(AWS_ERROR_DEST_COPY_TOO_SMALL); |
532 | } |
533 | |
534 | if (from->len > 0) { |
535 | /* This assert teaches clang-tidy that from->ptr and to->buffer cannot be null in a non-empty buffers */ |
536 | AWS_ASSERT(from->ptr); |
537 | AWS_ASSERT(to->buffer); |
538 | memcpy(to->buffer + to->len, from->ptr, from->len); |
539 | to->len += from->len; |
540 | } |
541 | |
542 | AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); |
543 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); |
544 | return AWS_OP_SUCCESS; |
545 | } |
546 | |
547 | int aws_byte_buf_append_with_lookup( |
548 | struct aws_byte_buf *AWS_RESTRICT to, |
549 | const struct aws_byte_cursor *AWS_RESTRICT from, |
550 | const uint8_t *lookup_table) { |
551 | AWS_PRECONDITION(aws_byte_buf_is_valid(to)); |
552 | AWS_PRECONDITION(aws_byte_cursor_is_valid(from)); |
553 | AWS_PRECONDITION( |
554 | AWS_MEM_IS_READABLE(lookup_table, 256), "Input array [lookup_table] must be at least 256 bytes long." ); |
555 | |
556 | if (to->capacity - to->len < from->len) { |
557 | AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); |
558 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); |
559 | return aws_raise_error(AWS_ERROR_DEST_COPY_TOO_SMALL); |
560 | } |
561 | |
562 | for (size_t i = 0; i < from->len; ++i) { |
563 | to->buffer[to->len + i] = lookup_table[from->ptr[i]]; |
564 | } |
565 | |
566 | if (aws_add_size_checked(to->len, from->len, &to->len)) { |
567 | return AWS_OP_ERR; |
568 | } |
569 | |
570 | AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); |
571 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); |
572 | return AWS_OP_SUCCESS; |
573 | } |
574 | |
575 | int aws_byte_buf_append_dynamic(struct aws_byte_buf *to, const struct aws_byte_cursor *from) { |
576 | AWS_PRECONDITION(aws_byte_buf_is_valid(to)); |
577 | AWS_PRECONDITION(aws_byte_cursor_is_valid(from)); |
578 | AWS_ERROR_PRECONDITION(to->allocator); |
579 | |
580 | if (to->capacity - to->len < from->len) { |
581 | /* |
582 | * NewCapacity = Max(OldCapacity * 2, OldCapacity + MissingCapacity) |
583 | */ |
584 | size_t missing_capacity = from->len - (to->capacity - to->len); |
585 | |
586 | size_t required_capacity = 0; |
587 | if (aws_add_size_checked(to->capacity, missing_capacity, &required_capacity)) { |
588 | AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); |
589 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); |
590 | return AWS_OP_ERR; |
591 | } |
592 | |
593 | /* |
594 | * It's ok if this overflows, just clamp to max possible. |
595 | * In theory this lets us still grow a buffer that's larger than 1/2 size_t space |
596 | * at least enough to accommodate the append. |
597 | */ |
598 | size_t growth_capacity = aws_add_size_saturating(to->capacity, to->capacity); |
599 | |
600 | size_t new_capacity = required_capacity; |
601 | if (new_capacity < growth_capacity) { |
602 | new_capacity = growth_capacity; |
603 | } |
604 | |
605 | /* |
606 | * Attempt to resize - we intentionally do not use reserve() in order to preserve |
607 | * the (unlikely) use case of from and to being the same buffer range. |
608 | */ |
609 | |
610 | /* |
611 | * Try the max, but if that fails and the required is smaller, try it in fallback |
612 | */ |
613 | uint8_t *new_buffer = aws_mem_acquire(to->allocator, new_capacity); |
614 | if (new_buffer == NULL) { |
615 | if (new_capacity > required_capacity) { |
616 | new_capacity = required_capacity; |
617 | new_buffer = aws_mem_acquire(to->allocator, new_capacity); |
618 | if (new_buffer == NULL) { |
619 | AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); |
620 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); |
621 | return AWS_OP_ERR; |
622 | } |
623 | } else { |
624 | AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); |
625 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); |
626 | return AWS_OP_ERR; |
627 | } |
628 | } |
629 | |
630 | /* |
631 | * Copy old buffer -> new buffer |
632 | */ |
633 | if (to->len > 0) { |
634 | memcpy(new_buffer, to->buffer, to->len); |
635 | } |
636 | /* |
637 | * Copy what we actually wanted to append in the first place |
638 | */ |
639 | if (from->len > 0) { |
640 | memcpy(new_buffer + to->len, from->ptr, from->len); |
641 | } |
642 | /* |
643 | * Get rid of the old buffer |
644 | */ |
645 | aws_mem_release(to->allocator, to->buffer); |
646 | |
647 | /* |
648 | * Switch to the new buffer |
649 | */ |
650 | to->buffer = new_buffer; |
651 | to->capacity = new_capacity; |
652 | } else { |
653 | if (from->len > 0) { |
654 | /* This assert teaches clang-tidy that from->ptr and to->buffer cannot be null in a non-empty buffers */ |
655 | AWS_ASSERT(from->ptr); |
656 | AWS_ASSERT(to->buffer); |
657 | memcpy(to->buffer + to->len, from->ptr, from->len); |
658 | } |
659 | } |
660 | |
661 | to->len += from->len; |
662 | |
663 | AWS_POSTCONDITION(aws_byte_buf_is_valid(to)); |
664 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(from)); |
665 | return AWS_OP_SUCCESS; |
666 | } |
667 | |
668 | int aws_byte_buf_reserve(struct aws_byte_buf *buffer, size_t requested_capacity) { |
669 | AWS_ERROR_PRECONDITION(buffer->allocator); |
670 | AWS_ERROR_PRECONDITION(aws_byte_buf_is_valid(buffer)); |
671 | |
672 | if (requested_capacity <= buffer->capacity) { |
673 | AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer)); |
674 | return AWS_OP_SUCCESS; |
675 | } |
676 | |
677 | if (aws_mem_realloc(buffer->allocator, (void **)&buffer->buffer, buffer->capacity, requested_capacity)) { |
678 | AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer)); |
679 | return AWS_OP_ERR; |
680 | } |
681 | |
682 | buffer->capacity = requested_capacity; |
683 | |
684 | AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer)); |
685 | return AWS_OP_SUCCESS; |
686 | } |
687 | |
688 | int aws_byte_buf_reserve_relative(struct aws_byte_buf *buffer, size_t additional_length) { |
689 | AWS_ERROR_PRECONDITION(buffer->allocator); |
690 | AWS_ERROR_PRECONDITION(aws_byte_buf_is_valid(buffer)); |
691 | |
692 | size_t requested_capacity = 0; |
693 | if (AWS_UNLIKELY(aws_add_size_checked(buffer->len, additional_length, &requested_capacity))) { |
694 | AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer)); |
695 | return AWS_OP_ERR; |
696 | } |
697 | |
698 | return aws_byte_buf_reserve(buffer, requested_capacity); |
699 | } |
700 | |
701 | struct aws_byte_cursor aws_byte_cursor_right_trim_pred( |
702 | const struct aws_byte_cursor *source, |
703 | aws_byte_predicate_fn *predicate) { |
704 | AWS_PRECONDITION(aws_byte_cursor_is_valid(source)); |
705 | AWS_PRECONDITION(predicate != NULL); |
706 | struct aws_byte_cursor trimmed = *source; |
707 | |
708 | while (trimmed.len > 0 && predicate(*(trimmed.ptr + trimmed.len - 1))) { |
709 | --trimmed.len; |
710 | } |
711 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(source)); |
712 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(&trimmed)); |
713 | return trimmed; |
714 | } |
715 | |
716 | struct aws_byte_cursor aws_byte_cursor_left_trim_pred( |
717 | const struct aws_byte_cursor *source, |
718 | aws_byte_predicate_fn *predicate) { |
719 | AWS_PRECONDITION(aws_byte_cursor_is_valid(source)); |
720 | AWS_PRECONDITION(predicate != NULL); |
721 | struct aws_byte_cursor trimmed = *source; |
722 | |
723 | while (trimmed.len > 0 && predicate(*(trimmed.ptr))) { |
724 | --trimmed.len; |
725 | ++trimmed.ptr; |
726 | } |
727 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(source)); |
728 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(&trimmed)); |
729 | return trimmed; |
730 | } |
731 | |
732 | struct aws_byte_cursor aws_byte_cursor_trim_pred( |
733 | const struct aws_byte_cursor *source, |
734 | aws_byte_predicate_fn *predicate) { |
735 | AWS_PRECONDITION(aws_byte_cursor_is_valid(source)); |
736 | AWS_PRECONDITION(predicate != NULL); |
737 | struct aws_byte_cursor left_trimmed = aws_byte_cursor_left_trim_pred(source, predicate); |
738 | struct aws_byte_cursor dest = aws_byte_cursor_right_trim_pred(&left_trimmed, predicate); |
739 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(source)); |
740 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(&dest)); |
741 | return dest; |
742 | } |
743 | |
744 | bool aws_byte_cursor_satisfies_pred(const struct aws_byte_cursor *source, aws_byte_predicate_fn *predicate) { |
745 | struct aws_byte_cursor trimmed = aws_byte_cursor_left_trim_pred(source, predicate); |
746 | bool rval = (trimmed.len == 0); |
747 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(source)); |
748 | return rval; |
749 | } |
750 | |
751 | int aws_byte_cursor_compare_lexical(const struct aws_byte_cursor *lhs, const struct aws_byte_cursor *rhs) { |
752 | AWS_PRECONDITION(aws_byte_cursor_is_valid(lhs)); |
753 | AWS_PRECONDITION(aws_byte_cursor_is_valid(rhs)); |
754 | /* make sure we don't pass NULL pointers to memcmp */ |
755 | AWS_PRECONDITION(lhs->ptr != NULL); |
756 | AWS_PRECONDITION(rhs->ptr != NULL); |
757 | size_t comparison_length = lhs->len; |
758 | if (comparison_length > rhs->len) { |
759 | comparison_length = rhs->len; |
760 | } |
761 | |
762 | int result = memcmp(lhs->ptr, rhs->ptr, comparison_length); |
763 | |
764 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(lhs)); |
765 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(rhs)); |
766 | if (result != 0) { |
767 | return result; |
768 | } |
769 | |
770 | if (lhs->len != rhs->len) { |
771 | return comparison_length == lhs->len ? -1 : 1; |
772 | } |
773 | |
774 | return 0; |
775 | } |
776 | |
777 | int aws_byte_cursor_compare_lookup( |
778 | const struct aws_byte_cursor *lhs, |
779 | const struct aws_byte_cursor *rhs, |
780 | const uint8_t *lookup_table) { |
781 | AWS_PRECONDITION(aws_byte_cursor_is_valid(lhs)); |
782 | AWS_PRECONDITION(aws_byte_cursor_is_valid(rhs)); |
783 | AWS_PRECONDITION(AWS_MEM_IS_READABLE(lookup_table, 256)); |
784 | const uint8_t *lhs_curr = lhs->ptr; |
785 | const uint8_t *lhs_end = lhs_curr + lhs->len; |
786 | |
787 | const uint8_t *rhs_curr = rhs->ptr; |
788 | const uint8_t *rhs_end = rhs_curr + rhs->len; |
789 | |
790 | while (lhs_curr < lhs_end && rhs_curr < rhs_end) { |
791 | uint8_t lhc = lookup_table[*lhs_curr]; |
792 | uint8_t rhc = lookup_table[*rhs_curr]; |
793 | |
794 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(lhs)); |
795 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(rhs)); |
796 | if (lhc < rhc) { |
797 | return -1; |
798 | } |
799 | |
800 | if (lhc > rhc) { |
801 | return 1; |
802 | } |
803 | |
804 | lhs_curr++; |
805 | rhs_curr++; |
806 | } |
807 | |
808 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(lhs)); |
809 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(rhs)); |
810 | if (lhs_curr < lhs_end) { |
811 | return 1; |
812 | } |
813 | |
814 | if (rhs_curr < rhs_end) { |
815 | return -1; |
816 | } |
817 | |
818 | return 0; |
819 | } |
820 | |
821 | /** |
822 | * For creating a byte buffer from a null-terminated string literal. |
823 | */ |
824 | struct aws_byte_buf aws_byte_buf_from_c_str(const char *c_str) { |
825 | struct aws_byte_buf buf; |
826 | buf.len = (!c_str) ? 0 : strlen(c_str); |
827 | buf.capacity = buf.len; |
828 | buf.buffer = (buf.capacity == 0) ? NULL : (uint8_t *)c_str; |
829 | buf.allocator = NULL; |
830 | AWS_POSTCONDITION(aws_byte_buf_is_valid(&buf)); |
831 | return buf; |
832 | } |
833 | |
834 | struct aws_byte_buf aws_byte_buf_from_array(const void *bytes, size_t len) { |
835 | AWS_PRECONDITION(AWS_MEM_IS_WRITABLE(bytes, len), "Input array [bytes] must be writable up to [len] bytes." ); |
836 | struct aws_byte_buf buf; |
837 | buf.buffer = (len > 0) ? (uint8_t *)bytes : NULL; |
838 | buf.len = len; |
839 | buf.capacity = len; |
840 | buf.allocator = NULL; |
841 | AWS_POSTCONDITION(aws_byte_buf_is_valid(&buf)); |
842 | return buf; |
843 | } |
844 | |
845 | struct aws_byte_buf aws_byte_buf_from_empty_array(const void *bytes, size_t capacity) { |
846 | AWS_PRECONDITION( |
847 | AWS_MEM_IS_WRITABLE(bytes, capacity), "Input array [bytes] must be writable up to [capacity] bytes." ); |
848 | struct aws_byte_buf buf; |
849 | buf.buffer = (capacity > 0) ? (uint8_t *)bytes : NULL; |
850 | buf.len = 0; |
851 | buf.capacity = capacity; |
852 | buf.allocator = NULL; |
853 | AWS_POSTCONDITION(aws_byte_buf_is_valid(&buf)); |
854 | return buf; |
855 | } |
856 | |
857 | struct aws_byte_cursor aws_byte_cursor_from_buf(const struct aws_byte_buf *const buf) { |
858 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
859 | struct aws_byte_cursor cur; |
860 | cur.ptr = buf->buffer; |
861 | cur.len = buf->len; |
862 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(&cur)); |
863 | return cur; |
864 | } |
865 | |
866 | struct aws_byte_cursor aws_byte_cursor_from_c_str(const char *c_str) { |
867 | struct aws_byte_cursor cur; |
868 | cur.ptr = (uint8_t *)c_str; |
869 | cur.len = (cur.ptr) ? strlen(c_str) : 0; |
870 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(&cur)); |
871 | return cur; |
872 | } |
873 | |
874 | struct aws_byte_cursor aws_byte_cursor_from_array(const void *const bytes, const size_t len) { |
875 | AWS_PRECONDITION(len == 0 || AWS_MEM_IS_READABLE(bytes, len), "Input array [bytes] must be readable up to [len]." ); |
876 | struct aws_byte_cursor cur; |
877 | cur.ptr = (uint8_t *)bytes; |
878 | cur.len = len; |
879 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(&cur)); |
880 | return cur; |
881 | } |
882 | |
883 | #ifdef CBMC |
884 | # pragma CPROVER check push |
885 | # pragma CPROVER check disable "unsigned-overflow" |
886 | #endif |
887 | /** |
888 | * If index >= bound, bound > (SIZE_MAX / 2), or index > (SIZE_MAX / 2), returns |
889 | * 0. Otherwise, returns UINTPTR_MAX. This function is designed to return the correct |
890 | * value even under CPU speculation conditions, and is intended to be used for |
891 | * SPECTRE mitigation purposes. |
892 | */ |
893 | size_t aws_nospec_mask(size_t index, size_t bound) { |
894 | /* |
895 | * SPECTRE mitigation - we compute a mask that will be zero if len < 0 |
896 | * or len >= buf->len, and all-ones otherwise, and AND it into the index. |
897 | * It is critical that we avoid any branches in this logic. |
898 | */ |
899 | |
900 | /* |
901 | * Hide the index value from the optimizer. This helps ensure that all this |
902 | * logic doesn't get eliminated. |
903 | */ |
904 | #if defined(__GNUC__) || defined(__clang__) |
905 | __asm__ __volatile__("" : "+r" (index)); |
906 | #endif |
907 | #if defined(_MSVC_LANG) |
908 | /* |
909 | * MSVC doesn't have a good way for us to blind the optimizer, and doesn't |
910 | * even have inline asm on x64. Some experimentation indicates that this |
911 | * hack seems to confuse it sufficiently for our needs. |
912 | */ |
913 | *((volatile uint8_t *)&index) += 0; |
914 | #endif |
915 | |
916 | /* |
917 | * If len > (SIZE_MAX / 2), then we can end up with len - buf->len being |
918 | * positive simply because the sign bit got inverted away. So we also check |
919 | * that the sign bit isn't set from the start. |
920 | * |
921 | * We also check that bound <= (SIZE_MAX / 2) to catch cases where the |
922 | * buffer is _already_ out of bounds. |
923 | */ |
924 | size_t negative_mask = index | bound; |
925 | size_t toobig_mask = bound - index - (uintptr_t)1; |
926 | size_t combined_mask = negative_mask | toobig_mask; |
927 | |
928 | /* |
929 | * combined_mask needs to have its sign bit OFF for us to be in range. |
930 | * We'd like to expand this to a mask we can AND into our index, so flip |
931 | * that bit (and everything else), shift it over so it's the only bit in the |
932 | * ones position, and multiply across the entire register. |
933 | * |
934 | * First, extract the (inverse) top bit and move it to the lowest bit. |
935 | * Because there's no standard SIZE_BIT in C99, we'll divide by a mask with |
936 | * just the top bit set instead. |
937 | */ |
938 | |
939 | combined_mask = (~combined_mask) / (SIZE_MAX - (SIZE_MAX >> 1)); |
940 | |
941 | /* |
942 | * Now multiply it to replicate it across all bits. |
943 | * |
944 | * Note that GCC is smart enough to optimize the divide-and-multiply into |
945 | * an arithmetic right shift operation on x86. |
946 | */ |
947 | combined_mask = combined_mask * UINTPTR_MAX; |
948 | |
949 | return combined_mask; |
950 | } |
951 | #ifdef CBMC |
952 | # pragma CPROVER check pop |
953 | #endif |
954 | |
955 | /** |
956 | * Tests if the given aws_byte_cursor has at least len bytes remaining. If so, |
957 | * *buf is advanced by len bytes (incrementing ->ptr and decrementing ->len), |
958 | * and an aws_byte_cursor referring to the first len bytes of the original *buf |
959 | * is returned. Otherwise, an aws_byte_cursor with ->ptr = NULL, ->len = 0 is |
960 | * returned. |
961 | * |
962 | * Note that if len is above (SIZE_MAX / 2), this function will also treat it as |
963 | * a buffer overflow, and return NULL without changing *buf. |
964 | */ |
965 | struct aws_byte_cursor aws_byte_cursor_advance(struct aws_byte_cursor *const cursor, const size_t len) { |
966 | AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor)); |
967 | struct aws_byte_cursor rv; |
968 | if (cursor->len > (SIZE_MAX >> 1) || len > (SIZE_MAX >> 1) || len > cursor->len) { |
969 | rv.ptr = NULL; |
970 | rv.len = 0; |
971 | } else { |
972 | rv.ptr = cursor->ptr; |
973 | rv.len = len; |
974 | |
975 | cursor->ptr += len; |
976 | cursor->len -= len; |
977 | } |
978 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor)); |
979 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(&rv)); |
980 | return rv; |
981 | } |
982 | |
983 | /** |
984 | * Behaves identically to aws_byte_cursor_advance, but avoids speculative |
985 | * execution potentially reading out-of-bounds pointers (by returning an |
986 | * empty ptr in such speculated paths). |
987 | * |
988 | * This should generally be done when using an untrusted or |
989 | * data-dependent value for 'len', to avoid speculating into a path where |
990 | * cursor->ptr points outside the true ptr length. |
991 | */ |
992 | |
993 | struct aws_byte_cursor aws_byte_cursor_advance_nospec(struct aws_byte_cursor *const cursor, size_t len) { |
994 | AWS_PRECONDITION(aws_byte_cursor_is_valid(cursor)); |
995 | |
996 | struct aws_byte_cursor rv; |
997 | |
998 | if (len <= cursor->len && len <= (SIZE_MAX >> 1) && cursor->len <= (SIZE_MAX >> 1)) { |
999 | /* |
1000 | * If we're speculating past a failed bounds check, null out the pointer. This ensures |
1001 | * that we don't try to read past the end of the buffer and leak information about other |
1002 | * memory through timing side-channels. |
1003 | */ |
1004 | uintptr_t mask = aws_nospec_mask(len, cursor->len + 1); |
1005 | |
1006 | /* Make sure we don't speculate-underflow len either */ |
1007 | len = len & mask; |
1008 | cursor->ptr = (uint8_t *)((uintptr_t)cursor->ptr & mask); |
1009 | /* Make sure subsequent nospec accesses don't advance ptr past NULL */ |
1010 | cursor->len = cursor->len & mask; |
1011 | |
1012 | rv.ptr = cursor->ptr; |
1013 | /* Make sure anything acting upon the returned cursor _also_ doesn't advance past NULL */ |
1014 | rv.len = len & mask; |
1015 | |
1016 | cursor->ptr += len; |
1017 | cursor->len -= len; |
1018 | } else { |
1019 | rv.ptr = NULL; |
1020 | rv.len = 0; |
1021 | } |
1022 | |
1023 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cursor)); |
1024 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(&rv)); |
1025 | return rv; |
1026 | } |
1027 | |
1028 | /** |
1029 | * Reads specified length of data from byte cursor and copies it to the |
1030 | * destination array. |
1031 | * |
1032 | * On success, returns true and updates the cursor pointer/length accordingly. |
1033 | * If there is insufficient space in the cursor, returns false, leaving the |
1034 | * cursor unchanged. |
1035 | */ |
1036 | bool aws_byte_cursor_read(struct aws_byte_cursor *AWS_RESTRICT cur, void *AWS_RESTRICT dest, const size_t len) { |
1037 | AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); |
1038 | AWS_PRECONDITION(AWS_MEM_IS_WRITABLE(dest, len)); |
1039 | struct aws_byte_cursor slice = aws_byte_cursor_advance_nospec(cur, len); |
1040 | |
1041 | if (slice.ptr) { |
1042 | memcpy(dest, slice.ptr, len); |
1043 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); |
1044 | AWS_POSTCONDITION(AWS_MEM_IS_READABLE(dest, len)); |
1045 | return true; |
1046 | } |
1047 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); |
1048 | return false; |
1049 | } |
1050 | |
1051 | /** |
1052 | * Reads as many bytes from cursor as size of buffer, and copies them to buffer. |
1053 | * |
1054 | * On success, returns true and updates the cursor pointer/length accordingly. |
1055 | * If there is insufficient space in the cursor, returns false, leaving the |
1056 | * cursor unchanged. |
1057 | */ |
1058 | bool aws_byte_cursor_read_and_fill_buffer( |
1059 | struct aws_byte_cursor *AWS_RESTRICT cur, |
1060 | struct aws_byte_buf *AWS_RESTRICT dest) { |
1061 | AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); |
1062 | AWS_PRECONDITION(aws_byte_buf_is_valid(dest)); |
1063 | if (aws_byte_cursor_read(cur, dest->buffer, dest->capacity)) { |
1064 | dest->len = dest->capacity; |
1065 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); |
1066 | AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); |
1067 | return true; |
1068 | } |
1069 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); |
1070 | AWS_POSTCONDITION(aws_byte_buf_is_valid(dest)); |
1071 | return false; |
1072 | } |
1073 | |
1074 | /** |
1075 | * Reads a single byte from cursor, placing it in *var. |
1076 | * |
1077 | * On success, returns true and updates the cursor pointer/length accordingly. |
1078 | * If there is insufficient space in the cursor, returns false, leaving the |
1079 | * cursor unchanged. |
1080 | */ |
1081 | bool aws_byte_cursor_read_u8(struct aws_byte_cursor *AWS_RESTRICT cur, uint8_t *AWS_RESTRICT var) { |
1082 | AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); |
1083 | bool rv = aws_byte_cursor_read(cur, var, 1); |
1084 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); |
1085 | return rv; |
1086 | } |
1087 | |
1088 | /** |
1089 | * Reads a 16-bit value in network byte order from cur, and places it in host |
1090 | * byte order into var. |
1091 | * |
1092 | * On success, returns true and updates the cursor pointer/length accordingly. |
1093 | * If there is insufficient space in the cursor, returns false, leaving the |
1094 | * cursor unchanged. |
1095 | */ |
1096 | bool aws_byte_cursor_read_be16(struct aws_byte_cursor *cur, uint16_t *var) { |
1097 | AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); |
1098 | AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var)); |
1099 | bool rv = aws_byte_cursor_read(cur, var, 2); |
1100 | |
1101 | if (AWS_LIKELY(rv)) { |
1102 | *var = aws_ntoh16(*var); |
1103 | } |
1104 | |
1105 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); |
1106 | return rv; |
1107 | } |
1108 | |
1109 | /** |
1110 | * Reads a 32-bit value in network byte order from cur, and places it in host |
1111 | * byte order into var. |
1112 | * |
1113 | * On success, returns true and updates the cursor pointer/length accordingly. |
1114 | * If there is insufficient space in the cursor, returns false, leaving the |
1115 | * cursor unchanged. |
1116 | */ |
1117 | bool aws_byte_cursor_read_be32(struct aws_byte_cursor *cur, uint32_t *var) { |
1118 | AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); |
1119 | AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var)); |
1120 | bool rv = aws_byte_cursor_read(cur, var, 4); |
1121 | |
1122 | if (AWS_LIKELY(rv)) { |
1123 | *var = aws_ntoh32(*var); |
1124 | } |
1125 | |
1126 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); |
1127 | return rv; |
1128 | } |
1129 | |
1130 | /** |
1131 | * Reads a 32-bit value in network byte order from cur, and places it in host |
1132 | * byte order into var. |
1133 | * |
1134 | * On success, returns true and updates the cursor pointer/length accordingly. |
1135 | * If there is insufficient space in the cursor, returns false, leaving the |
1136 | * cursor unchanged. |
1137 | */ |
1138 | bool aws_byte_cursor_read_float_be32(struct aws_byte_cursor *cur, float *var) { |
1139 | AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); |
1140 | AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var)); |
1141 | bool rv = aws_byte_cursor_read(cur, var, sizeof(float)); |
1142 | |
1143 | if (AWS_LIKELY(rv)) { |
1144 | *var = aws_ntohf32(*var); |
1145 | } |
1146 | |
1147 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); |
1148 | return rv; |
1149 | } |
1150 | |
1151 | /** |
1152 | * Reads a 64-bit value in network byte order from cur, and places it in host |
1153 | * byte order into var. |
1154 | * |
1155 | * On success, returns true and updates the cursor pointer/length accordingly. |
1156 | * If there is insufficient space in the cursor, returns false, leaving the |
1157 | * cursor unchanged. |
1158 | */ |
1159 | bool aws_byte_cursor_read_float_be64(struct aws_byte_cursor *cur, double *var) { |
1160 | AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); |
1161 | AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var)); |
1162 | bool rv = aws_byte_cursor_read(cur, var, sizeof(double)); |
1163 | |
1164 | if (AWS_LIKELY(rv)) { |
1165 | *var = aws_ntohf64(*var); |
1166 | } |
1167 | |
1168 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); |
1169 | return rv; |
1170 | } |
1171 | |
1172 | /** |
1173 | * Reads a 64-bit value in network byte order from cur, and places it in host |
1174 | * byte order into var. |
1175 | * |
1176 | * On success, returns true and updates the cursor pointer/length accordingly. |
1177 | * If there is insufficient space in the cursor, returns false, leaving the |
1178 | * cursor unchanged. |
1179 | */ |
1180 | bool aws_byte_cursor_read_be64(struct aws_byte_cursor *cur, uint64_t *var) { |
1181 | AWS_PRECONDITION(aws_byte_cursor_is_valid(cur)); |
1182 | AWS_PRECONDITION(AWS_OBJECT_PTR_IS_WRITABLE(var)); |
1183 | bool rv = aws_byte_cursor_read(cur, var, sizeof(*var)); |
1184 | |
1185 | if (AWS_LIKELY(rv)) { |
1186 | *var = aws_ntoh64(*var); |
1187 | } |
1188 | |
1189 | AWS_POSTCONDITION(aws_byte_cursor_is_valid(cur)); |
1190 | return rv; |
1191 | } |
1192 | |
1193 | /** |
1194 | * Appends a sub-buffer to the specified buffer. |
1195 | * |
1196 | * If the buffer has at least `len' bytes remaining (buffer->capacity - buffer->len >= len), |
1197 | * then buffer->len is incremented by len, and an aws_byte_buf is assigned to *output corresponding |
1198 | * to the last len bytes of the input buffer. The aws_byte_buf at *output will have a null |
1199 | * allocator, a zero initial length, and a capacity of 'len'. The function then returns true. |
1200 | * |
1201 | * If there is insufficient space, then this function nulls all fields in *output and returns |
1202 | * false. |
1203 | */ |
1204 | bool aws_byte_buf_advance( |
1205 | struct aws_byte_buf *const AWS_RESTRICT buffer, |
1206 | struct aws_byte_buf *const AWS_RESTRICT output, |
1207 | const size_t len) { |
1208 | AWS_PRECONDITION(aws_byte_buf_is_valid(buffer)); |
1209 | AWS_PRECONDITION(aws_byte_buf_is_valid(output)); |
1210 | if (buffer->capacity - buffer->len >= len) { |
1211 | *output = aws_byte_buf_from_array(buffer->buffer + buffer->len, len); |
1212 | buffer->len += len; |
1213 | output->len = 0; |
1214 | AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer)); |
1215 | AWS_POSTCONDITION(aws_byte_buf_is_valid(output)); |
1216 | return true; |
1217 | } else { |
1218 | AWS_ZERO_STRUCT(*output); |
1219 | AWS_POSTCONDITION(aws_byte_buf_is_valid(buffer)); |
1220 | AWS_POSTCONDITION(aws_byte_buf_is_valid(output)); |
1221 | return false; |
1222 | } |
1223 | } |
1224 | |
1225 | /** |
1226 | * Write specified number of bytes from array to byte buffer. |
1227 | * |
1228 | * On success, returns true and updates the buffer length accordingly. |
1229 | * If there is insufficient space in the buffer, returns false, leaving the |
1230 | * buffer unchanged. |
1231 | */ |
1232 | bool aws_byte_buf_write(struct aws_byte_buf *AWS_RESTRICT buf, const uint8_t *AWS_RESTRICT src, size_t len) { |
1233 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
1234 | AWS_PRECONDITION(AWS_MEM_IS_WRITABLE(src, len), "Input array [src] must be readable up to [len] bytes." ); |
1235 | |
1236 | if (buf->len > (SIZE_MAX >> 1) || len > (SIZE_MAX >> 1) || buf->len + len > buf->capacity) { |
1237 | AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); |
1238 | return false; |
1239 | } |
1240 | |
1241 | memcpy(buf->buffer + buf->len, src, len); |
1242 | buf->len += len; |
1243 | |
1244 | AWS_POSTCONDITION(aws_byte_buf_is_valid(buf)); |
1245 | return true; |
1246 | } |
1247 | |
1248 | /** |
1249 | * Copies all bytes from buffer to buffer. |
1250 | * |
1251 | * On success, returns true and updates the buffer /length accordingly. |
1252 | * If there is insufficient space in the buffer, returns false, leaving the |
1253 | * buffer unchanged. |
1254 | */ |
1255 | bool aws_byte_buf_write_from_whole_buffer(struct aws_byte_buf *AWS_RESTRICT buf, struct aws_byte_buf src) { |
1256 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
1257 | AWS_PRECONDITION(aws_byte_buf_is_valid(&src)); |
1258 | return aws_byte_buf_write(buf, src.buffer, src.len); |
1259 | } |
1260 | |
1261 | /** |
1262 | * Copies all bytes from buffer to buffer. |
1263 | * |
1264 | * On success, returns true and updates the buffer /length accordingly. |
1265 | * If there is insufficient space in the buffer, returns false, leaving the |
1266 | * buffer unchanged. |
1267 | */ |
1268 | bool aws_byte_buf_write_from_whole_cursor(struct aws_byte_buf *AWS_RESTRICT buf, struct aws_byte_cursor src) { |
1269 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
1270 | AWS_PRECONDITION(aws_byte_cursor_is_valid(&src)); |
1271 | return aws_byte_buf_write(buf, src.ptr, src.len); |
1272 | } |
1273 | |
1274 | /** |
1275 | * Copies one byte to buffer. |
1276 | * |
1277 | * On success, returns true and updates the cursor /length |
1278 | accordingly. |
1279 | |
1280 | * If there is insufficient space in the cursor, returns false, leaving the |
1281 | cursor unchanged. |
1282 | */ |
1283 | bool aws_byte_buf_write_u8(struct aws_byte_buf *AWS_RESTRICT buf, uint8_t c) { |
1284 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
1285 | return aws_byte_buf_write(buf, &c, 1); |
1286 | } |
1287 | |
1288 | /** |
1289 | * Writes a 16-bit integer in network byte order (big endian) to buffer. |
1290 | * |
1291 | * On success, returns true and updates the cursor /length accordingly. |
1292 | * If there is insufficient space in the cursor, returns false, leaving the |
1293 | * cursor unchanged. |
1294 | */ |
1295 | bool aws_byte_buf_write_be16(struct aws_byte_buf *buf, uint16_t x) { |
1296 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
1297 | x = aws_hton16(x); |
1298 | return aws_byte_buf_write(buf, (uint8_t *)&x, 2); |
1299 | } |
1300 | |
1301 | /** |
1302 | * Writes a 32-bit integer in network byte order (big endian) to buffer. |
1303 | * |
1304 | * On success, returns true and updates the cursor /length accordingly. |
1305 | * If there is insufficient space in the cursor, returns false, leaving the |
1306 | * cursor unchanged. |
1307 | */ |
1308 | bool aws_byte_buf_write_be32(struct aws_byte_buf *buf, uint32_t x) { |
1309 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
1310 | x = aws_hton32(x); |
1311 | return aws_byte_buf_write(buf, (uint8_t *)&x, 4); |
1312 | } |
1313 | |
1314 | /** |
1315 | * Writes a 32-bit float in network byte order (big endian) to buffer. |
1316 | * |
1317 | * On success, returns true and updates the cursor /length accordingly. |
1318 | * If there is insufficient space in the cursor, returns false, leaving the |
1319 | * cursor unchanged. |
1320 | */ |
1321 | bool aws_byte_buf_write_float_be32(struct aws_byte_buf *buf, float x) { |
1322 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
1323 | x = aws_htonf32(x); |
1324 | return aws_byte_buf_write(buf, (uint8_t *)&x, 4); |
1325 | } |
1326 | |
1327 | /** |
1328 | * Writes a 64-bit integer in network byte order (big endian) to buffer. |
1329 | * |
1330 | * On success, returns true and updates the cursor /length accordingly. |
1331 | * If there is insufficient space in the cursor, returns false, leaving the |
1332 | * cursor unchanged. |
1333 | */ |
1334 | bool aws_byte_buf_write_be64(struct aws_byte_buf *buf, uint64_t x) { |
1335 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
1336 | x = aws_hton64(x); |
1337 | return aws_byte_buf_write(buf, (uint8_t *)&x, 8); |
1338 | } |
1339 | |
1340 | /** |
1341 | * Writes a 64-bit float in network byte order (big endian) to buffer. |
1342 | * |
1343 | * On success, returns true and updates the cursor /length accordingly. |
1344 | * If there is insufficient space in the cursor, returns false, leaving the |
1345 | * cursor unchanged. |
1346 | */ |
1347 | bool aws_byte_buf_write_float_be64(struct aws_byte_buf *buf, double x) { |
1348 | AWS_PRECONDITION(aws_byte_buf_is_valid(buf)); |
1349 | x = aws_htonf64(x); |
1350 | return aws_byte_buf_write(buf, (uint8_t *)&x, 8); |
1351 | } |
1352 | |
1353 | int aws_byte_buf_append_and_update(struct aws_byte_buf *to, struct aws_byte_cursor *from_and_update) { |
1354 | AWS_PRECONDITION(aws_byte_buf_is_valid(to)); |
1355 | AWS_PRECONDITION(aws_byte_cursor_is_valid(from_and_update)); |
1356 | |
1357 | if (aws_byte_buf_append(to, from_and_update)) { |
1358 | return AWS_OP_ERR; |
1359 | } |
1360 | |
1361 | from_and_update->ptr = to->buffer + (to->len - from_and_update->len); |
1362 | return AWS_OP_SUCCESS; |
1363 | } |
1364 | |