1/*
2 * Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License").
5 * You may not use this file except in compliance with the License.
6 * A copy of the License is located at
7 *
8 * http://aws.amazon.com/apache2.0
9 *
10 * or in the "license" file accompanying this file. This file is distributed
11 * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
12 * express or implied. See the License for the specific language governing
13 * permissions and limitations under the License.
14 */
15
16#include <aws/common/ring_buffer.h>
17
18#include <aws/common/byte_buf.h>
19
20#ifdef CBMC
21# define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \
22 dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order); \
23 assert(__CPROVER_POINTER_OBJECT(dest_ptr) == __CPROVER_POINTER_OBJECT(ring_buf->allocation)); \
24 assert(aws_ring_buffer_check_atomic_ptr(ring_buf, dest_ptr));
25# define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \
26 assert(aws_ring_buffer_check_atomic_ptr(ring_buf, src_ptr)); \
27 aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order);
28#else
29# define AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, atomic_ptr, memory_order) \
30 dest_ptr = aws_atomic_load_ptr_explicit(atomic_ptr, memory_order);
31# define AWS_ATOMIC_STORE_PTR(ring_buf, atomic_ptr, src_ptr, memory_order) \
32 aws_atomic_store_ptr_explicit(atomic_ptr, src_ptr, memory_order);
33#endif
34#define AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, dest_ptr) \
35 AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->tail, aws_memory_order_acquire);
36#define AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, src_ptr) \
37 AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->tail, src_ptr, aws_memory_order_release);
38#define AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, dest_ptr) \
39 AWS_ATOMIC_LOAD_PTR(ring_buf, dest_ptr, &(ring_buf)->head, aws_memory_order_relaxed);
40#define AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, src_ptr) \
41 AWS_ATOMIC_STORE_PTR(ring_buf, &(ring_buf)->head, src_ptr, aws_memory_order_relaxed);
42
43int aws_ring_buffer_init(struct aws_ring_buffer *ring_buf, struct aws_allocator *allocator, size_t size) {
44 AWS_PRECONDITION(ring_buf != NULL);
45 AWS_PRECONDITION(allocator != NULL);
46 AWS_PRECONDITION(size > 0);
47
48 AWS_ZERO_STRUCT(*ring_buf);
49
50 ring_buf->allocation = aws_mem_acquire(allocator, size);
51
52 if (!ring_buf->allocation) {
53 return AWS_OP_ERR;
54 }
55
56 ring_buf->allocator = allocator;
57 aws_atomic_init_ptr(&ring_buf->head, ring_buf->allocation);
58 aws_atomic_init_ptr(&ring_buf->tail, ring_buf->allocation);
59 ring_buf->allocation_end = ring_buf->allocation + size;
60
61 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
62 return AWS_OP_SUCCESS;
63}
64
65void aws_ring_buffer_clean_up(struct aws_ring_buffer *ring_buf) {
66 AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
67 if (ring_buf->allocation) {
68 aws_mem_release(ring_buf->allocator, ring_buf->allocation);
69 }
70
71 AWS_ZERO_STRUCT(*ring_buf);
72}
73
74int aws_ring_buffer_acquire(struct aws_ring_buffer *ring_buf, size_t requested_size, struct aws_byte_buf *dest) {
75 AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
76 AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
77 AWS_ERROR_PRECONDITION(requested_size != 0);
78
79 uint8_t *tail_cpy;
80 uint8_t *head_cpy;
81 AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy);
82 AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy);
83
84 /* this branch is, we don't have any vended buffers. */
85 if (head_cpy == tail_cpy) {
86 size_t ring_space = ring_buf->allocation_end - ring_buf->allocation;
87
88 if (requested_size > ring_space) {
89 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
90 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
91 return aws_raise_error(AWS_ERROR_OOM);
92 }
93 AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
94 AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation);
95 *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
96 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
97 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
98 return AWS_OP_SUCCESS;
99 }
100
101 /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */
102 /* after N + 1 wraps */
103 if (tail_cpy > head_cpy) {
104 size_t space = tail_cpy - head_cpy - 1;
105
106 if (space >= requested_size) {
107 AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
108 *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
109 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
110 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
111 return AWS_OP_SUCCESS;
112 }
113 /* After N wraps */
114 } else if (tail_cpy < head_cpy) {
115 /* prefer the head space for efficiency. */
116 if ((size_t)(ring_buf->allocation_end - head_cpy) >= requested_size) {
117 AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
118 *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
119 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
120 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
121 return AWS_OP_SUCCESS;
122 }
123
124 if ((size_t)(tail_cpy - ring_buf->allocation) > requested_size) {
125 AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
126 *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
127 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
128 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
129 return AWS_OP_SUCCESS;
130 }
131 }
132
133 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
134 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
135 return aws_raise_error(AWS_ERROR_OOM);
136}
137
138int aws_ring_buffer_acquire_up_to(
139 struct aws_ring_buffer *ring_buf,
140 size_t minimum_size,
141 size_t requested_size,
142 struct aws_byte_buf *dest) {
143 AWS_PRECONDITION(requested_size >= minimum_size);
144 AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buf));
145 AWS_PRECONDITION(aws_byte_buf_is_valid(dest));
146
147 if (requested_size == 0 || minimum_size == 0 || !ring_buf || !dest) {
148 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
149 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
150 return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
151 }
152
153 uint8_t *tail_cpy;
154 uint8_t *head_cpy;
155 AWS_ATOMIC_LOAD_TAIL_PTR(ring_buf, tail_cpy);
156 AWS_ATOMIC_LOAD_HEAD_PTR(ring_buf, head_cpy);
157
158 /* this branch is, we don't have any vended buffers. */
159 if (head_cpy == tail_cpy) {
160 size_t ring_space = ring_buf->allocation_end - ring_buf->allocation;
161
162 size_t allocation_size = ring_space > requested_size ? requested_size : ring_space;
163
164 if (allocation_size < minimum_size) {
165 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
166 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
167 return aws_raise_error(AWS_ERROR_OOM);
168 }
169
170 /* go as big as we can. */
171 /* we don't have any vended, so this should be safe. */
172 AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + allocation_size);
173 AWS_ATOMIC_STORE_TAIL_PTR(ring_buf, ring_buf->allocation);
174 *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, allocation_size);
175 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
176 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
177 return AWS_OP_SUCCESS;
178 }
179 /* you'll constantly bounce between the next two branches as the ring buffer is traversed. */
180 /* after N + 1 wraps */
181 if (tail_cpy > head_cpy) {
182 size_t space = tail_cpy - head_cpy;
183 /* this shouldn't be possible. */
184 AWS_ASSERT(space);
185 space -= 1;
186
187 size_t returnable_size = space > requested_size ? requested_size : space;
188
189 if (returnable_size >= minimum_size) {
190 AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + returnable_size);
191 *dest = aws_byte_buf_from_empty_array(head_cpy, returnable_size);
192 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
193 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
194 return AWS_OP_SUCCESS;
195 }
196 /* after N wraps */
197 } else if (tail_cpy < head_cpy) {
198 size_t head_space = ring_buf->allocation_end - head_cpy;
199 size_t tail_space = tail_cpy - ring_buf->allocation;
200
201 /* if you can vend the whole thing do it. Also prefer head space to tail space. */
202 if (head_space >= requested_size) {
203 AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + requested_size);
204 *dest = aws_byte_buf_from_empty_array(head_cpy, requested_size);
205 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
206 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
207 return AWS_OP_SUCCESS;
208 }
209
210 if (tail_space > requested_size) {
211 AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + requested_size);
212 *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, requested_size);
213 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
214 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
215 return AWS_OP_SUCCESS;
216 }
217
218 /* now vend as much as possible, once again preferring head space. */
219 if (head_space >= minimum_size && head_space >= tail_space) {
220 AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, head_cpy + head_space);
221 *dest = aws_byte_buf_from_empty_array(head_cpy, head_space);
222 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
223 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
224 return AWS_OP_SUCCESS;
225 }
226
227 if (tail_space > minimum_size) {
228 AWS_ATOMIC_STORE_HEAD_PTR(ring_buf, ring_buf->allocation + tail_space - 1);
229 *dest = aws_byte_buf_from_empty_array(ring_buf->allocation, tail_space - 1);
230 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
231 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
232 return AWS_OP_SUCCESS;
233 }
234 }
235
236 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buf));
237 AWS_POSTCONDITION(aws_byte_buf_is_valid(dest));
238 return aws_raise_error(AWS_ERROR_OOM);
239}
240
241static inline bool s_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) {
242#ifdef CBMC
243 /* only continue if buf points-into ring_buffer because comparison of pointers to different objects is undefined
244 * (C11 6.5.8) */
245 if ((__CPROVER_POINTER_OBJECT(buf->buffer) != __CPROVER_POINTER_OBJECT(ring_buffer->allocation)) ||
246 (__CPROVER_POINTER_OBJECT(buf->buffer) != __CPROVER_POINTER_OBJECT(ring_buffer->allocation_end))) {
247 return false;
248 }
249#endif
250 return buf->buffer && ring_buffer->allocation && ring_buffer->allocation_end &&
251 buf->buffer >= ring_buffer->allocation && buf->buffer + buf->capacity <= ring_buffer->allocation_end;
252}
253
254void aws_ring_buffer_release(struct aws_ring_buffer *ring_buffer, struct aws_byte_buf *buf) {
255 AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer));
256 AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
257 AWS_PRECONDITION(s_buf_belongs_to_pool(ring_buffer, buf));
258 AWS_ATOMIC_STORE_TAIL_PTR(ring_buffer, buf->buffer + buf->capacity);
259 AWS_ZERO_STRUCT(*buf);
260 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer));
261}
262
263bool aws_ring_buffer_buf_belongs_to_pool(const struct aws_ring_buffer *ring_buffer, const struct aws_byte_buf *buf) {
264 AWS_PRECONDITION(aws_ring_buffer_is_valid(ring_buffer));
265 AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
266 bool rval = s_buf_belongs_to_pool(ring_buffer, buf);
267 AWS_POSTCONDITION(aws_ring_buffer_is_valid(ring_buffer));
268 AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
269 return rval;
270}
271
272/* Ring buffer allocator implementation */
273static void *s_ring_buffer_mem_acquire(struct aws_allocator *allocator, size_t size) {
274 struct aws_ring_buffer *buffer = allocator->impl;
275 struct aws_byte_buf buf;
276 AWS_ZERO_STRUCT(buf);
277 /* allocate extra space for the size */
278 if (aws_ring_buffer_acquire(buffer, size + sizeof(size_t), &buf)) {
279 return NULL;
280 }
281 /* store the size ahead of the allocation */
282 *((size_t *)buf.buffer) = buf.capacity;
283 return buf.buffer + sizeof(size_t);
284}
285
286static void s_ring_buffer_mem_release(struct aws_allocator *allocator, void *ptr) {
287 /* back up to where the size is stored */
288 const void *addr = ((uint8_t *)ptr - sizeof(size_t));
289 const size_t size = *((size_t *)addr);
290
291 struct aws_byte_buf buf = aws_byte_buf_from_array(addr, size);
292 buf.allocator = allocator;
293
294 struct aws_ring_buffer *buffer = allocator->impl;
295 aws_ring_buffer_release(buffer, &buf);
296}
297
298static void *s_ring_buffer_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) {
299 void *mem = s_ring_buffer_mem_acquire(allocator, num * size);
300 if (!mem) {
301 return NULL;
302 }
303 memset(mem, 0, num * size);
304 return mem;
305}
306
307static void *s_ring_buffer_mem_realloc(struct aws_allocator *allocator, void *ptr, size_t old_size, size_t new_size) {
308 (void)allocator;
309 (void)ptr;
310 (void)old_size;
311 (void)new_size;
312 AWS_FATAL_ASSERT(!"ring_buffer_allocator does not support realloc, as it breaks allocation ordering");
313 return NULL;
314}
315
316int aws_ring_buffer_allocator_init(struct aws_allocator *allocator, struct aws_ring_buffer *ring_buffer) {
317 if (allocator == NULL || ring_buffer == NULL) {
318 return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
319 }
320
321 allocator->impl = ring_buffer;
322 allocator->mem_acquire = s_ring_buffer_mem_acquire;
323 allocator->mem_release = s_ring_buffer_mem_release;
324 allocator->mem_calloc = s_ring_buffer_mem_calloc;
325 allocator->mem_realloc = s_ring_buffer_mem_realloc;
326 return AWS_OP_SUCCESS;
327}
328
329void aws_ring_buffer_allocator_clean_up(struct aws_allocator *allocator) {
330 AWS_ZERO_STRUCT(*allocator);
331}
332