1/*****************************************************************************
2
3Copyright (c) 2013, 2016, Oracle and/or its affiliates. All Rights Reserved.
4Copyright (c) 2018, MariaDB Corporation.
5
6This program is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free Software
8Foundation; version 2 of the License.
9
10This program is distributed in the hope that it will be useful, but WITHOUT
11ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
12FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
13
14You should have received a copy of the GNU General Public License along with
15this program; if not, write to the Free Software Foundation, Inc.,
1651 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
17
18*****************************************************************************/
19
20/**************************************************//**
21@file include/dyn0buf.h
22The dynamically allocated buffer implementation
23
24Created 2013-03-16 Sunny Bains
25*******************************************************/
26
27#ifndef dyn0buf_h
28#define dyn0buf_h
29
30#include "univ.i"
31#include "ut0lst.h"
32#include "mem0mem.h"
33#include "dyn0types.h"
34
35/** Class that manages dynamic buffers. It uses a UT_LIST of
36mtr_buf_t::block_t instances. We don't use STL containers in
37order to avoid the overhead of heap calls. Using a custom memory
38allocator doesn't solve the problem either because we have to get
39the memory from somewhere. We can't use the block_t::m_data as the
40backend for the custom allocator because we would like the data in
41the blocks to be contiguous. */
42class mtr_buf_t {
43public:
44
45 class block_t;
46
47 typedef UT_LIST_NODE_T(block_t) block_node_t;
48 typedef UT_LIST_BASE_NODE_T(block_t) block_list_t;
49
50 /** SIZE - sizeof(m_node) + sizeof(m_used) */
51 enum { MAX_DATA_SIZE = DYN_ARRAY_DATA_SIZE
52 - sizeof(block_node_t) + sizeof(ib_uint32_t) };
53
54 class block_t {
55 public:
56
57 block_t()
58 {
59 compile_time_assert(MAX_DATA_SIZE <= (2 << 15));
60 init();
61 }
62
63 /**
64 Gets the number of used bytes in a block.
65 @return number of bytes used */
66 ulint used() const
67 MY_ATTRIBUTE((warn_unused_result))
68 {
69 return(static_cast<ulint>(m_used & ~DYN_BLOCK_FULL_FLAG));
70 }
71
72 /**
73 Gets pointer to the start of data.
74 @return pointer to data */
75 byte* start()
76 MY_ATTRIBUTE((warn_unused_result))
77 {
78 return(m_data);
79 }
80
81 /**
82 @return start of data - non const version */
83 byte* begin()
84 MY_ATTRIBUTE((warn_unused_result))
85 {
86 return(m_data);
87 }
88
89 /**
90 @return end of used data - non const version */
91 byte* end()
92 MY_ATTRIBUTE((warn_unused_result))
93 {
94 return(begin() + m_used);
95 }
96
97 /**
98 @return start of data - const version */
99 const byte* begin() const
100 MY_ATTRIBUTE((warn_unused_result))
101 {
102 return(m_data);
103 }
104
105 /**
106 @return end of used data - const version */
107 const byte* end() const
108 MY_ATTRIBUTE((warn_unused_result))
109 {
110 return(begin() + m_used);
111 }
112
113 private:
114 /**
115 @return pointer to start of reserved space */
116 template <typename Type>
117 Type push(uint32_t size)
118 {
119 Type ptr = reinterpret_cast<Type>(end());
120
121 m_used += size;
122 ut_ad(m_used <= uint32_t(MAX_DATA_SIZE));
123
124 return(ptr);
125 }
126
127 /**
128 Grow the stack. */
129 void close(const byte* ptr)
130 {
131 /* Check that it is within bounds */
132 ut_ad(ptr >= begin());
133 ut_ad(ptr <= begin() + m_buf_end);
134
135 /* We have done the boundary check above */
136 m_used = uint32_t(ptr - begin());
137
138 ut_ad(m_used <= MAX_DATA_SIZE);
139 ut_d(m_buf_end = 0);
140 }
141
142 /**
143 Initialise the block */
144 void init()
145 {
146 m_used = 0;
147 ut_d(m_buf_end = 0);
148 ut_d(m_magic_n = DYN_BLOCK_MAGIC_N);
149 }
150 private:
151#ifdef UNIV_DEBUG
152 /** If opened then this is the buffer end offset, else 0 */
153 ulint m_buf_end;
154
155 /** Magic number (DYN_BLOCK_MAGIC_N) */
156 ulint m_magic_n;
157#endif /* UNIV_DEBUG */
158
159 /** Storage */
160 byte m_data[MAX_DATA_SIZE];
161
162 /** Doubly linked list node. */
163 block_node_t m_node;
164
165 /** number of data bytes used in this block;
166 DYN_BLOCK_FULL_FLAG is set when the block becomes full */
167 uint32_t m_used;
168
169 friend class mtr_buf_t;
170 };
171
172 /** Default constructor */
173 mtr_buf_t()
174 :
175 m_heap(),
176 m_size()
177 {
178 UT_LIST_INIT(m_list, &block_t::m_node);
179 push_back(&m_first_block);
180 }
181
182 /** Destructor */
183 ~mtr_buf_t()
184 {
185 erase();
186 }
187
188 /** Reset the buffer vector */
189 void erase()
190 {
191 if (m_heap != NULL) {
192 mem_heap_free(m_heap);
193 m_heap = NULL;
194
195 /* Initialise the list and add the first block. */
196 UT_LIST_INIT(m_list, &block_t::m_node);
197 push_back(&m_first_block);
198 } else {
199 m_first_block.init();
200 ut_ad(UT_LIST_GET_LEN(m_list) == 1);
201 }
202
203 m_size = 0;
204 }
205
206 /**
207 Makes room on top and returns a pointer to a buffer in it. After
208 copying the elements, the caller must close the buffer using close().
209 @param size in bytes of the buffer; MUST be <= MAX_DATA_SIZE!
210 @return pointer to the buffer */
211 byte* open(ulint size)
212 MY_ATTRIBUTE((warn_unused_result))
213 {
214 ut_ad(size > 0);
215 ut_ad(size <= MAX_DATA_SIZE);
216
217 block_t* block;
218
219 block = has_space(size) ? back() : add_block();
220
221 ut_ad(block->m_used <= MAX_DATA_SIZE);
222 ut_d(block->m_buf_end = block->m_used + size);
223
224 return(block->end());
225 }
226
227 /**
228 Closes the buffer returned by open.
229 @param ptr end of used space */
230 void close(const byte* ptr)
231 {
232 ut_ad(UT_LIST_GET_LEN(m_list) > 0);
233 block_t* block = back();
234
235 m_size -= block->used();
236
237 block->close(ptr);
238
239 m_size += block->used();
240 }
241
242 /**
243 Makes room on top and returns a pointer to the added element.
244 The caller must copy the element to the pointer returned.
245 @param size in bytes of the element
246 @return pointer to the element */
247 template <typename Type>
248 Type push(uint32_t size)
249 {
250 ut_ad(size > 0);
251 ut_ad(size <= MAX_DATA_SIZE);
252
253 block_t* block;
254
255 block = has_space(size) ? back() : add_block();
256
257 m_size += size;
258
259 /* See ISO C++03 14.2/4 for why "template" is required. */
260
261 return(block->template push<Type>(size));
262 }
263
264 /**
265 Pushes n bytes.
266 @param str string to write
267 @param len string length */
268 void push(const byte* ptr, uint32_t len)
269 {
270 while (len > 0) {
271 uint32_t n_copied = std::min(len,
272 uint32_t(MAX_DATA_SIZE));
273 ::memmove(push<byte*>(n_copied), ptr, n_copied);
274
275 ptr += n_copied;
276 len -= n_copied;
277 }
278 }
279
280 /**
281 Returns a pointer to an element in the buffer. const version.
282 @param pos position of element in bytes from start
283 @return pointer to element */
284 template <typename Type>
285 const Type at(ulint pos) const
286 {
287 block_t* block = const_cast<block_t*>(
288 const_cast<mtr_buf_t*>(this)->find(pos));
289
290 return(reinterpret_cast<Type>(block->begin() + pos));
291 }
292
293 /**
294 Returns a pointer to an element in the buffer. non const version.
295 @param pos position of element in bytes from start
296 @return pointer to element */
297 template <typename Type>
298 Type at(ulint pos)
299 {
300 block_t* block = const_cast<block_t*>(find(pos));
301
302 return(reinterpret_cast<Type>(block->begin() + pos));
303 }
304
305 /**
306 Returns the size of the total stored data.
307 @return data size in bytes */
308 ulint size() const
309 MY_ATTRIBUTE((warn_unused_result))
310 {
311#ifdef UNIV_DEBUG
312 ulint total_size = 0;
313
314 for (const block_t* block = UT_LIST_GET_FIRST(m_list);
315 block != NULL;
316 block = UT_LIST_GET_NEXT(m_node, block)) {
317
318 total_size += block->used();
319 }
320
321 ut_ad(total_size == m_size);
322#endif /* UNIV_DEBUG */
323 return(m_size);
324 }
325
326 /**
327 Iterate over each block and call the functor.
328 @return false if iteration was terminated. */
329 template <typename Functor>
330 bool for_each_block(Functor& functor) const
331 {
332 for (const block_t* block = UT_LIST_GET_FIRST(m_list);
333 block != NULL;
334 block = UT_LIST_GET_NEXT(m_node, block)) {
335
336 if (!functor(block)) {
337 return(false);
338 }
339 }
340
341 return(true);
342 }
343
344 /**
345 Iterate over all the blocks in reverse and call the iterator
346 @return false if iteration was terminated. */
347 template <typename Functor>
348 bool for_each_block_in_reverse(Functor& functor) const
349 {
350 for (block_t* block = UT_LIST_GET_LAST(m_list);
351 block != NULL;
352 block = UT_LIST_GET_PREV(m_node, block)) {
353
354 if (!functor(block)) {
355 return(false);
356 }
357 }
358
359 return(true);
360 }
361
362 /**
363 @return the first block */
364 block_t* front()
365 MY_ATTRIBUTE((warn_unused_result))
366 {
367 ut_ad(UT_LIST_GET_LEN(m_list) > 0);
368 return(UT_LIST_GET_FIRST(m_list));
369 }
370
371 /**
372 @return true if m_first_block block was not filled fully */
373 bool is_small() const
374 MY_ATTRIBUTE((warn_unused_result))
375 {
376 return(m_heap == NULL);
377 }
378
379private:
380 // Disable copying
381 mtr_buf_t(const mtr_buf_t&);
382 mtr_buf_t& operator=(const mtr_buf_t&);
383
384 /**
385 Add the block to the end of the list*/
386 void push_back(block_t* block)
387 {
388 block->init();
389
390 UT_LIST_ADD_LAST(m_list, block);
391 }
392
393 /** @return the last block in the list */
394 block_t* back() const
395 {
396 return(UT_LIST_GET_LAST(m_list));
397 }
398
399 /*
400 @return true if request can be fullfilled */
401 bool has_space(ulint size) const
402 {
403 return(back()->m_used + size <= MAX_DATA_SIZE);
404 }
405
406 /*
407 @return true if request can be fullfilled */
408 bool has_space(ulint size)
409 {
410 return(back()->m_used + size <= MAX_DATA_SIZE);
411 }
412
413 /** Find the block that contains the pos.
414 @param pos absolute offset, it is updated to make it relative
415 to the block
416 @return the block containing the pos. */
417 block_t* find(ulint& pos)
418 {
419 block_t* block;
420
421 ut_ad(UT_LIST_GET_LEN(m_list) > 0);
422
423 for (block = UT_LIST_GET_FIRST(m_list);
424 block != NULL;
425 block = UT_LIST_GET_NEXT(m_node, block)) {
426
427 if (pos < block->used()) {
428 break;
429 }
430
431 pos -= block->used();
432 }
433
434 ut_ad(block != NULL);
435 ut_ad(block->used() >= pos);
436
437 return(block);
438 }
439
440 /**
441 Allocate and add a new block to m_list */
442 block_t* add_block()
443 {
444 block_t* block;
445
446 if (m_heap == NULL) {
447 m_heap = mem_heap_create(sizeof(*block));
448 }
449
450 block = reinterpret_cast<block_t*>(
451 mem_heap_alloc(m_heap, sizeof(*block)));
452
453 push_back(block);
454
455 return(block);
456 }
457
458private:
459 /** Heap to use for memory allocation */
460 mem_heap_t* m_heap;
461
462 /** Allocated blocks */
463 block_list_t m_list;
464
465 /** Total size used by all blocks */
466 ulint m_size;
467
468 /** The default block, should always be the first element. This
469 is for backwards compatibility and to avoid an extra heap allocation
470 for small REDO log records */
471 block_t m_first_block;
472};
473
474/** mtr_buf_t copier */
475struct mtr_buf_copy_t {
476 /** The copied buffer */
477 mtr_buf_t m_buf;
478
479 /** Append a block to the redo log buffer.
480 @return whether the appending should continue (always true here) */
481 bool operator()(const mtr_buf_t::block_t* block)
482 {
483 byte* buf = m_buf.open(block->used());
484 memcpy(buf, block->begin(), block->used());
485 m_buf.close(buf + block->used());
486 return(true);
487 }
488};
489
490#endif /* dyn0buf_h */
491