1/*****************************************************************************
2
3Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
4Copyright (c) 2008, Google Inc.
5Copyright (c) 2014, 2018, MariaDB Corporation.
6
7Portions of this file contain modifications contributed and copyrighted by
8Google, Inc. Those modifications are gratefully acknowledged and are described
9briefly in the InnoDB documentation. The contributions by Google are
10incorporated with their permission, and subject to the conditions contained in
11the file COPYING.Google.
12
13This program is free software; you can redistribute it and/or modify it under
14the terms of the GNU General Public License as published by the Free Software
15Foundation; version 2 of the License.
16
17This program is distributed in the hope that it will be useful, but WITHOUT
18ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
19FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
20
21You should have received a copy of the GNU General Public License along with
22this program; if not, write to the Free Software Foundation, Inc.,
2351 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
24
25*****************************************************************************/
26
27/**************************************************//**
28@file include/buf0buf.ic
29The database buffer buf_pool
30
31Created 11/5/1995 Heikki Tuuri
32*******************************************************/
33
34#include "mtr0mtr.h"
35#include "buf0flu.h"
36#include "buf0lru.h"
37#include "buf0rea.h"
38#include "sync0debug.h"
39#include "fsp0types.h"
40#include "ut0new.h"
41
42/** A chunk of buffers. The buffer pool is allocated in chunks. */
43struct buf_chunk_t{
44 ulint size; /*!< size of frames[] and blocks[] */
45 unsigned char* mem; /*!< pointer to the memory area which
46 was allocated for the frames */
47 ut_new_pfx_t mem_pfx; /*!< Auxiliary structure, describing
48 "mem". It is filled by the allocator's
49 alloc method and later passed to the
50 deallocate method. */
51 buf_block_t* blocks; /*!< array of buffer control blocks */
52
53 /** Get the size of 'mem' in bytes. */
54 size_t mem_size() const {
55 return(mem_pfx.m_size);
56 }
57};
58
59/*********************************************************************//**
60Gets the current size of buffer buf_pool in bytes.
61@return size in bytes */
62UNIV_INLINE
63ulint
64buf_pool_get_curr_size(void)
65/*========================*/
66{
67 return(srv_buf_pool_curr_size);
68}
69
70/********************************************************************//**
71Calculates the index of a buffer pool to the buf_pool[] array.
72@return the position of the buffer pool in buf_pool[] */
73UNIV_INLINE
74unsigned
75buf_pool_index(
76/*===========*/
77 const buf_pool_t* buf_pool) /*!< in: buffer pool */
78{
79 unsigned i = unsigned(buf_pool - buf_pool_ptr);
80 ut_ad(i < MAX_BUFFER_POOLS);
81 ut_ad(i < srv_buf_pool_instances);
82 return(i);
83}
84
85/******************************************************************//**
86Returns the buffer pool instance given a page instance
87@return buf_pool */
88UNIV_INLINE
89buf_pool_t*
90buf_pool_from_bpage(
91/*================*/
92 const buf_page_t* bpage) /*!< in: buffer pool page */
93{
94 ut_ad(bpage->buf_pool_index < srv_buf_pool_instances);
95 return(&buf_pool_ptr[bpage->buf_pool_index]);
96}
97
98/******************************************************************//**
99Returns the buffer pool instance given a block instance
100@return buf_pool */
101UNIV_INLINE
102buf_pool_t*
103buf_pool_from_block(
104/*================*/
105 const buf_block_t* block) /*!< in: block */
106{
107 return(buf_pool_from_bpage(&block->page));
108}
109
110/*********************************************************************//**
111Gets the current size of buffer buf_pool in pages.
112@return size in pages*/
113UNIV_INLINE
114ulint
115buf_pool_get_n_pages(void)
116/*======================*/
117{
118 return buf_pool_get_curr_size() >> srv_page_size_shift;
119}
120
121/********************************************************************//**
122Reads the freed_page_clock of a buffer block.
123@return freed_page_clock */
124UNIV_INLINE
125unsigned
126buf_page_get_freed_page_clock(
127/*==========================*/
128 const buf_page_t* bpage) /*!< in: block */
129{
130 /* This is sometimes read without holding buf_pool->mutex. */
131 return(bpage->freed_page_clock);
132}
133
134/********************************************************************//**
135Reads the freed_page_clock of a buffer block.
136@return freed_page_clock */
137UNIV_INLINE
138unsigned
139buf_block_get_freed_page_clock(
140/*===========================*/
141 const buf_block_t* block) /*!< in: block */
142{
143 return(buf_page_get_freed_page_clock(&block->page));
144}
145
146/********************************************************************//**
147Tells if a block is still close enough to the MRU end of the LRU list
148meaning that it is not in danger of getting evicted and also implying
149that it has been accessed recently.
150Note that this is for heuristics only and does not reserve buffer pool
151mutex.
152@return TRUE if block is close to MRU end of LRU */
153UNIV_INLINE
154ibool
155buf_page_peek_if_young(
156/*===================*/
157 const buf_page_t* bpage) /*!< in: block */
158{
159 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
160
161 /* FIXME: bpage->freed_page_clock is 31 bits */
162 return((buf_pool->freed_page_clock & ((1UL << 31) - 1))
163 < (bpage->freed_page_clock
164 + (buf_pool->curr_size
165 * (BUF_LRU_OLD_RATIO_DIV - buf_pool->LRU_old_ratio)
166 / (BUF_LRU_OLD_RATIO_DIV * 4))));
167}
168
169/********************************************************************//**
170Recommends a move of a block to the start of the LRU list if there is danger
171of dropping from the buffer pool. NOTE: does not reserve the buffer pool
172mutex.
173@return TRUE if should be made younger */
174UNIV_INLINE
175ibool
176buf_page_peek_if_too_old(
177/*=====================*/
178 const buf_page_t* bpage) /*!< in: block to make younger */
179{
180 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
181
182 if (buf_pool->freed_page_clock == 0) {
183 /* If eviction has not started yet, do not update the
184 statistics or move blocks in the LRU list. This is
185 either the warm-up phase or an in-memory workload. */
186 return(FALSE);
187 } else if (buf_LRU_old_threshold_ms && bpage->old) {
188 unsigned access_time = buf_page_is_accessed(bpage);
189
190 /* It is possible that the below comparison returns an
191 unexpected result. 2^32 milliseconds pass in about 50 days,
192 so if the difference between ut_time_ms() and access_time
193 is e.g. 50 days + 15 ms, then the below will behave as if
194 it is 15 ms. This is known and fixing it would require to
195 increase buf_page_t::access_time from 32 to 64 bits. */
196 if (access_time > 0
197 && ((ib_uint32_t) (ut_time_ms() - access_time))
198 >= buf_LRU_old_threshold_ms) {
199 return(TRUE);
200 }
201
202 buf_pool->stat.n_pages_not_made_young++;
203 return(FALSE);
204 } else {
205 return(!buf_page_peek_if_young(bpage));
206 }
207}
208
209/*********************************************************************//**
210Gets the state of a block.
211@return state */
212UNIV_INLINE
213enum buf_page_state
214buf_page_get_state(
215/*===============*/
216 const buf_page_t* bpage) /*!< in: pointer to the control block */
217{
218 enum buf_page_state state = bpage->state;
219
220#ifdef UNIV_DEBUG
221 switch (state) {
222 case BUF_BLOCK_POOL_WATCH:
223 case BUF_BLOCK_ZIP_PAGE:
224 case BUF_BLOCK_ZIP_DIRTY:
225 case BUF_BLOCK_NOT_USED:
226 case BUF_BLOCK_READY_FOR_USE:
227 case BUF_BLOCK_FILE_PAGE:
228 case BUF_BLOCK_MEMORY:
229 case BUF_BLOCK_REMOVE_HASH:
230 break;
231 default:
232 ut_error;
233 }
234#endif /* UNIV_DEBUG */
235
236 return(state);
237}
238/*********************************************************************//**
239Gets the state of a block.
240@return state */
241UNIV_INLINE
242enum buf_page_state
243buf_block_get_state(
244/*================*/
245 const buf_block_t* block) /*!< in: pointer to the control block */
246{
247 return(buf_page_get_state(&block->page));
248}
249
250/*********************************************************************//**
251Gets the state name for state of a block
252@return name or "CORRUPTED" */
253UNIV_INLINE
254const char*
255buf_get_state_name(
256/*===============*/
257 const buf_block_t* block) /*!< in: pointer to the control
258 block */
259{
260 enum buf_page_state state = buf_page_get_state(&block->page);
261
262 switch (state) {
263 case BUF_BLOCK_POOL_WATCH:
264 return (const char *) "BUF_BLOCK_POOL_WATCH";
265 case BUF_BLOCK_ZIP_PAGE:
266 return (const char *) "BUF_BLOCK_ZIP_PAGE";
267 case BUF_BLOCK_ZIP_DIRTY:
268 return (const char *) "BUF_BLOCK_ZIP_DIRTY";
269 case BUF_BLOCK_NOT_USED:
270 return (const char *) "BUF_BLOCK_NOT_USED";
271 case BUF_BLOCK_READY_FOR_USE:
272 return (const char *) "BUF_BLOCK_NOT_USED";
273 case BUF_BLOCK_FILE_PAGE:
274 return (const char *) "BUF_BLOCK_FILE_PAGE";
275 case BUF_BLOCK_MEMORY:
276 return (const char *) "BUF_BLOCK_MEMORY";
277 case BUF_BLOCK_REMOVE_HASH:
278 return (const char *) "BUF_BLOCK_REMOVE_HASH";
279 default:
280 return (const char *) "CORRUPTED";
281 }
282}
283
284/*********************************************************************//**
285Sets the state of a block. */
286UNIV_INLINE
287void
288buf_page_set_state(
289/*===============*/
290 buf_page_t* bpage, /*!< in/out: pointer to control block */
291 enum buf_page_state state) /*!< in: state */
292{
293#ifdef UNIV_DEBUG
294 enum buf_page_state old_state = buf_page_get_state(bpage);
295
296 switch (old_state) {
297 case BUF_BLOCK_POOL_WATCH:
298 ut_error;
299 break;
300 case BUF_BLOCK_ZIP_PAGE:
301 ut_a(state == BUF_BLOCK_ZIP_DIRTY);
302 break;
303 case BUF_BLOCK_ZIP_DIRTY:
304 ut_a(state == BUF_BLOCK_ZIP_PAGE);
305 break;
306 case BUF_BLOCK_NOT_USED:
307 ut_a(state == BUF_BLOCK_READY_FOR_USE);
308 break;
309 case BUF_BLOCK_READY_FOR_USE:
310 ut_a(state == BUF_BLOCK_MEMORY
311 || state == BUF_BLOCK_FILE_PAGE
312 || state == BUF_BLOCK_NOT_USED);
313 break;
314 case BUF_BLOCK_MEMORY:
315 ut_a(state == BUF_BLOCK_NOT_USED);
316 break;
317 case BUF_BLOCK_FILE_PAGE:
318 if (!(state == BUF_BLOCK_NOT_USED
319 || state == BUF_BLOCK_REMOVE_HASH
320 || state == BUF_BLOCK_FILE_PAGE)) {
321 const char *old_state_name = buf_get_state_name((buf_block_t*)bpage);
322 bpage->state = state;
323
324 fprintf(stderr,
325 "InnoDB: Error: block old state %d (%s) "
326 " new state %d (%s) not correct\n",
327 old_state,
328 old_state_name,
329 state,
330 buf_get_state_name((buf_block_t*)bpage));
331 ut_a(state == BUF_BLOCK_NOT_USED
332 || state == BUF_BLOCK_REMOVE_HASH
333 || state == BUF_BLOCK_FILE_PAGE);
334 }
335
336 break;
337 case BUF_BLOCK_REMOVE_HASH:
338 ut_a(state == BUF_BLOCK_MEMORY);
339 break;
340 }
341#endif /* UNIV_DEBUG */
342 bpage->state = state;
343}
344
345/*********************************************************************//**
346Sets the state of a block. */
347UNIV_INLINE
348void
349buf_block_set_state(
350/*================*/
351 buf_block_t* block, /*!< in/out: pointer to control block */
352 enum buf_page_state state) /*!< in: state */
353{
354 buf_page_set_state(&block->page, state);
355}
356
357/*********************************************************************//**
358Determines if a block is mapped to a tablespace.
359@return TRUE if mapped */
360UNIV_INLINE
361ibool
362buf_page_in_file(
363/*=============*/
364 const buf_page_t* bpage) /*!< in: pointer to control block */
365{
366 switch (buf_page_get_state(bpage)) {
367 case BUF_BLOCK_POOL_WATCH:
368 ut_error;
369 break;
370 case BUF_BLOCK_ZIP_PAGE:
371 case BUF_BLOCK_ZIP_DIRTY:
372 case BUF_BLOCK_FILE_PAGE:
373 return(TRUE);
374 case BUF_BLOCK_NOT_USED:
375 case BUF_BLOCK_READY_FOR_USE:
376 case BUF_BLOCK_MEMORY:
377 case BUF_BLOCK_REMOVE_HASH:
378 break;
379 }
380
381 return(FALSE);
382}
383
384/*********************************************************************//**
385Determines if a block should be on unzip_LRU list.
386@return TRUE if block belongs to unzip_LRU */
387UNIV_INLINE
388ibool
389buf_page_belongs_to_unzip_LRU(
390/*==========================*/
391 const buf_page_t* bpage) /*!< in: pointer to control block */
392{
393 ut_ad(buf_page_in_file(bpage));
394
395 return(bpage->zip.data
396 && buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE);
397}
398
399/*********************************************************************//**
400Gets the mutex of a block.
401@return pointer to mutex protecting bpage */
402UNIV_INLINE
403BPageMutex*
404buf_page_get_mutex(
405/*===============*/
406 const buf_page_t* bpage) /*!< in: pointer to control block */
407{
408 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
409
410 switch (buf_page_get_state(bpage)) {
411 case BUF_BLOCK_POOL_WATCH:
412 ut_error;
413 return(NULL);
414 case BUF_BLOCK_ZIP_PAGE:
415 case BUF_BLOCK_ZIP_DIRTY:
416 return(&buf_pool->zip_mutex);
417 default:
418 return(&((buf_block_t*) bpage)->mutex);
419 }
420}
421
422/*********************************************************************//**
423Get the flush type of a page.
424@return flush type */
425UNIV_INLINE
426buf_flush_t
427buf_page_get_flush_type(
428/*====================*/
429 const buf_page_t* bpage) /*!< in: buffer page */
430{
431 buf_flush_t flush_type = (buf_flush_t) bpage->flush_type;
432
433#ifdef UNIV_DEBUG
434 switch (flush_type) {
435 case BUF_FLUSH_LRU:
436 case BUF_FLUSH_LIST:
437 case BUF_FLUSH_SINGLE_PAGE:
438 return(flush_type);
439 case BUF_FLUSH_N_TYPES:
440 ut_error;
441 }
442 ut_error;
443#endif /* UNIV_DEBUG */
444 return(flush_type);
445}
446/*********************************************************************//**
447Set the flush type of a page. */
448UNIV_INLINE
449void
450buf_page_set_flush_type(
451/*====================*/
452 buf_page_t* bpage, /*!< in: buffer page */
453 buf_flush_t flush_type) /*!< in: flush type */
454{
455 bpage->flush_type = flush_type;
456 ut_ad(buf_page_get_flush_type(bpage) == flush_type);
457}
458
459/** Map a block to a file page.
460@param[in,out] block pointer to control block
461@param[in] page_id page id */
462UNIV_INLINE
463void
464buf_block_set_file_page(
465 buf_block_t* block,
466 const page_id_t& page_id)
467{
468 buf_block_set_state(block, BUF_BLOCK_FILE_PAGE);
469 block->page.id.copy_from(page_id);
470}
471
472/*********************************************************************//**
473Gets the io_fix state of a block.
474@return io_fix state */
475UNIV_INLINE
476enum buf_io_fix
477buf_page_get_io_fix(
478/*================*/
479 const buf_page_t* bpage) /*!< in: pointer to the control block */
480{
481 ut_ad(bpage != NULL);
482
483 enum buf_io_fix io_fix = bpage->io_fix;
484
485#ifdef UNIV_DEBUG
486 switch (io_fix) {
487 case BUF_IO_NONE:
488 case BUF_IO_READ:
489 case BUF_IO_WRITE:
490 case BUF_IO_PIN:
491 return(io_fix);
492 }
493 ut_error;
494#endif /* UNIV_DEBUG */
495 return(io_fix);
496}
497
498/*********************************************************************//**
499Gets the io_fix state of a block.
500@return io_fix state */
501UNIV_INLINE
502enum buf_io_fix
503buf_block_get_io_fix(
504/*=================*/
505 const buf_block_t* block) /*!< in: pointer to the control block */
506{
507 return(buf_page_get_io_fix(&block->page));
508}
509
510/*********************************************************************//**
511Sets the io_fix state of a block. */
512UNIV_INLINE
513void
514buf_page_set_io_fix(
515/*================*/
516 buf_page_t* bpage, /*!< in/out: control block */
517 enum buf_io_fix io_fix) /*!< in: io_fix state */
518{
519#ifdef UNIV_DEBUG
520 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
521 ut_ad(buf_pool_mutex_own(buf_pool));
522#endif /* UNIV_DEBUG */
523 ut_ad(mutex_own(buf_page_get_mutex(bpage)));
524
525 bpage->io_fix = io_fix;
526 ut_ad(buf_page_get_io_fix(bpage) == io_fix);
527}
528
529/*********************************************************************//**
530Sets the io_fix state of a block. */
531UNIV_INLINE
532void
533buf_block_set_io_fix(
534/*=================*/
535 buf_block_t* block, /*!< in/out: control block */
536 enum buf_io_fix io_fix) /*!< in: io_fix state */
537{
538 buf_page_set_io_fix(&block->page, io_fix);
539}
540
541/*********************************************************************//**
542Makes a block sticky. A sticky block implies that even after we release
543the buf_pool->mutex and the block->mutex:
544* it cannot be removed from the flush_list
545* the block descriptor cannot be relocated
546* it cannot be removed from the LRU list
547Note that:
548* the block can still change its position in the LRU list
549* the next and previous pointers can change. */
550UNIV_INLINE
551void
552buf_page_set_sticky(
553/*================*/
554 buf_page_t* bpage) /*!< in/out: control block */
555{
556#ifdef UNIV_DEBUG
557 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
558 ut_ad(buf_pool_mutex_own(buf_pool));
559#endif /* UNIV_DEBUG */
560 ut_ad(mutex_own(buf_page_get_mutex(bpage)));
561 ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_NONE);
562
563 bpage->io_fix = BUF_IO_PIN;
564}
565
566/*********************************************************************//**
567Removes stickiness of a block. */
568UNIV_INLINE
569void
570buf_page_unset_sticky(
571/*==================*/
572 buf_page_t* bpage) /*!< in/out: control block */
573{
574#ifdef UNIV_DEBUG
575 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
576 ut_ad(buf_pool_mutex_own(buf_pool));
577#endif /* UNIV_DEBUG */
578 ut_ad(mutex_own(buf_page_get_mutex(bpage)));
579 ut_ad(buf_page_get_io_fix(bpage) == BUF_IO_PIN);
580
581 bpage->io_fix = BUF_IO_NONE;
582}
583
584/********************************************************************//**
585Determine if a buffer block can be relocated in memory. The block
586can be dirty, but it must not be I/O-fixed or bufferfixed. */
587UNIV_INLINE
588ibool
589buf_page_can_relocate(
590/*==================*/
591 const buf_page_t* bpage) /*!< control block being relocated */
592{
593#ifdef UNIV_DEBUG
594 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
595 ut_ad(buf_pool_mutex_own(buf_pool));
596#endif /* UNIV_DEBUG */
597 ut_ad(mutex_own(buf_page_get_mutex(bpage)));
598 ut_ad(buf_page_in_file(bpage));
599 ut_ad(bpage->in_LRU_list);
600
601 return(buf_page_get_io_fix(bpage) == BUF_IO_NONE
602 && bpage->buf_fix_count == 0);
603}
604
605/*********************************************************************//**
606Determine if a block has been flagged old.
607@return TRUE if old */
608UNIV_INLINE
609ibool
610buf_page_is_old(
611/*============*/
612 const buf_page_t* bpage) /*!< in: control block */
613{
614#ifdef UNIV_DEBUG
615 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
616 ut_ad(buf_pool_mutex_own(buf_pool));
617#endif /* UNIV_DEBUG */
618 ut_ad(buf_page_in_file(bpage));
619
620 return(bpage->old);
621}
622
623/*********************************************************************//**
624Flag a block old. */
625UNIV_INLINE
626void
627buf_page_set_old(
628/*=============*/
629 buf_page_t* bpage, /*!< in/out: control block */
630 bool old) /*!< in: old */
631{
632#ifdef UNIV_DEBUG
633 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
634#endif /* UNIV_DEBUG */
635 ut_a(buf_page_in_file(bpage));
636 ut_ad(buf_pool_mutex_own(buf_pool));
637 ut_ad(bpage->in_LRU_list);
638
639#ifdef UNIV_LRU_DEBUG
640 ut_a((buf_pool->LRU_old_len == 0) == (buf_pool->LRU_old == NULL));
641 /* If a block is flagged "old", the LRU_old list must exist. */
642 ut_a(!old || buf_pool->LRU_old);
643
644 if (UT_LIST_GET_PREV(LRU, bpage) && UT_LIST_GET_NEXT(LRU, bpage)) {
645 const buf_page_t* prev = UT_LIST_GET_PREV(LRU, bpage);
646 const buf_page_t* next = UT_LIST_GET_NEXT(LRU, bpage);
647 if (prev->old == next->old) {
648 ut_a(prev->old == old);
649 } else {
650 ut_a(!prev->old);
651 ut_a(buf_pool->LRU_old == (old ? bpage : next));
652 }
653 }
654#endif /* UNIV_LRU_DEBUG */
655
656 bpage->old = old;
657}
658
659/*********************************************************************//**
660Determine the time of first access of a block in the buffer pool.
661@return ut_time_ms() at the time of first access, 0 if not accessed */
662UNIV_INLINE
663unsigned
664buf_page_is_accessed(
665/*=================*/
666 const buf_page_t* bpage) /*!< in: control block */
667{
668 ut_ad(buf_page_in_file(bpage));
669
670 return(bpage->access_time);
671}
672
673/*********************************************************************//**
674Flag a block accessed. */
675UNIV_INLINE
676void
677buf_page_set_accessed(
678/*==================*/
679 buf_page_t* bpage) /*!< in/out: control block */
680{
681#ifdef UNIV_DEBUG
682 buf_pool_t* buf_pool = buf_pool_from_bpage(bpage);
683 ut_ad(!buf_pool_mutex_own(buf_pool));
684 ut_ad(mutex_own(buf_page_get_mutex(bpage)));
685#endif /* UNIV_DEBUG */
686
687 ut_a(buf_page_in_file(bpage));
688
689 if (bpage->access_time == 0) {
690 /* Make this the time of the first access. */
691 bpage->access_time = static_cast<uint>(ut_time_ms());
692 }
693}
694
695/*********************************************************************//**
696Gets the buf_block_t handle of a buffered file block if an uncompressed
697page frame exists, or NULL.
698@return control block, or NULL */
699UNIV_INLINE
700buf_block_t*
701buf_page_get_block(
702/*===============*/
703 buf_page_t* bpage) /*!< in: control block, or NULL */
704{
705 if (bpage != NULL) {
706 ut_ad(buf_page_in_file(bpage));
707
708 if (buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE) {
709 return((buf_block_t*) bpage);
710 }
711 }
712
713 return(NULL);
714}
715
716#ifdef UNIV_DEBUG
717/*********************************************************************//**
718Gets a pointer to the memory frame of a block.
719@return pointer to the frame */
720UNIV_INLINE
721buf_frame_t*
722buf_block_get_frame(
723/*================*/
724 const buf_block_t* block) /*!< in: pointer to the control block */
725{
726 if (!block) {
727 return NULL;
728 }
729
730 switch (buf_block_get_state(block)) {
731 case BUF_BLOCK_POOL_WATCH:
732 case BUF_BLOCK_ZIP_PAGE:
733 case BUF_BLOCK_ZIP_DIRTY:
734 case BUF_BLOCK_NOT_USED:
735 ut_error;
736 break;
737 case BUF_BLOCK_FILE_PAGE:
738 ut_a(block->page.buf_fix_count > 0);
739 /* fall through */
740 case BUF_BLOCK_READY_FOR_USE:
741 case BUF_BLOCK_MEMORY:
742 case BUF_BLOCK_REMOVE_HASH:
743 goto ok;
744 }
745 ut_error;
746ok:
747 return((buf_frame_t*) block->frame);
748}
749#endif /* UNIV_DEBUG */
750
751/***********************************************************************
752FIXME_FTS Gets the frame the pointer is pointing to. */
753UNIV_INLINE
754buf_frame_t*
755buf_frame_align(
756/*============*/
757 /* out: pointer to frame */
758 byte* ptr) /* in: pointer to a frame */
759{
760 buf_frame_t* frame;
761
762 ut_ad(ptr);
763
764 frame = (buf_frame_t*) ut_align_down(ptr, srv_page_size);
765
766 return(frame);
767}
768
769/**********************************************************************//**
770Gets the space id, page offset, and byte offset within page of a
771pointer pointing to a buffer frame containing a file page. */
772UNIV_INLINE
773void
774buf_ptr_get_fsp_addr(
775/*=================*/
776 const void* ptr, /*!< in: pointer to a buffer frame */
777 ulint* space, /*!< out: space id */
778 fil_addr_t* addr) /*!< out: page offset and byte offset */
779{
780 const page_t* page = (const page_t*) ut_align_down(ptr,
781 srv_page_size);
782
783 *space = mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
784 addr->page = mach_read_from_4(page + FIL_PAGE_OFFSET);
785 addr->boffset = ut_align_offset(ptr, srv_page_size);
786}
787
788/**********************************************************************//**
789Gets the hash value of the page the pointer is pointing to. This can be used
790in searches in the lock hash table.
791@return lock hash value */
792UNIV_INLINE
793unsigned
794buf_block_get_lock_hash_val(
795/*========================*/
796 const buf_block_t* block) /*!< in: block */
797{
798 ut_ad(block);
799 ut_ad(buf_page_in_file(&block->page));
800 ut_ad(rw_lock_own(&(((buf_block_t*) block)->lock), RW_LOCK_X)
801 || rw_lock_own(&(((buf_block_t*) block)->lock), RW_LOCK_S));
802
803 return(block->lock_hash_val);
804}
805
806/********************************************************************//**
807Allocates a buf_page_t descriptor. This function must succeed. In case
808of failure we assert in this function.
809@return: the allocated descriptor. */
810UNIV_INLINE
811buf_page_t*
812buf_page_alloc_descriptor(void)
813/*===========================*/
814{
815 buf_page_t* bpage;
816
817 bpage = (buf_page_t*) ut_zalloc_nokey(sizeof *bpage);
818 ut_ad(bpage);
819 UNIV_MEM_ALLOC(bpage, sizeof *bpage);
820
821 return(bpage);
822}
823
824/********************************************************************//**
825Free a buf_page_t descriptor. */
826UNIV_INLINE
827void
828buf_page_free_descriptor(
829/*=====================*/
830 buf_page_t* bpage) /*!< in: bpage descriptor to free. */
831{
832 ut_free(bpage);
833}
834
835/********************************************************************//**
836Frees a buffer block which does not contain a file page. */
837UNIV_INLINE
838void
839buf_block_free(
840/*===========*/
841 buf_block_t* block) /*!< in, own: block to be freed */
842{
843 buf_pool_t* buf_pool = buf_pool_from_bpage((buf_page_t*) block);
844
845 buf_pool_mutex_enter(buf_pool);
846
847 buf_page_mutex_enter(block);
848
849 ut_a(buf_block_get_state(block) != BUF_BLOCK_FILE_PAGE);
850
851 buf_LRU_block_free_non_file_page(block);
852
853 buf_page_mutex_exit(block);
854
855 buf_pool_mutex_exit(buf_pool);
856}
857
858/*********************************************************************//**
859Copies contents of a buffer frame to a given buffer.
860@return buf */
861UNIV_INLINE
862byte*
863buf_frame_copy(
864/*===========*/
865 byte* buf, /*!< in: buffer to copy to */
866 const buf_frame_t* frame) /*!< in: buffer frame */
867{
868 ut_ad(buf && frame);
869
870 ut_memcpy(buf, frame, srv_page_size);
871
872 return(buf);
873}
874
875/********************************************************************//**
876Gets the youngest modification log sequence number for a frame.
877Returns zero if not file page or no modification occurred yet.
878@return newest modification to page */
879UNIV_INLINE
880lsn_t
881buf_page_get_newest_modification(
882/*=============================*/
883 const buf_page_t* bpage) /*!< in: block containing the
884 page frame */
885{
886 lsn_t lsn;
887 BPageMutex* block_mutex = buf_page_get_mutex(bpage);
888
889 mutex_enter(block_mutex);
890
891 if (buf_page_in_file(bpage)) {
892 lsn = bpage->newest_modification;
893 } else {
894 lsn = 0;
895 }
896
897 mutex_exit(block_mutex);
898
899 return(lsn);
900}
901
902/********************************************************************//**
903Increments the modify clock of a frame by 1. The caller must (1) own the
904buf_pool mutex and block bufferfix count has to be zero, (2) or own an x-lock
905on the block. */
906UNIV_INLINE
907void
908buf_block_modify_clock_inc(
909/*=======================*/
910 buf_block_t* block) /*!< in: block */
911{
912#ifdef UNIV_DEBUG
913 buf_pool_t* buf_pool = buf_pool_from_bpage((buf_page_t*) block);
914
915 /* No latch is acquired for the shared temporary tablespace. */
916 if (!fsp_is_system_temporary(block->page.id.space())) {
917 ut_ad((buf_pool_mutex_own(buf_pool)
918 && (block->page.buf_fix_count == 0))
919 || rw_lock_own_flagged(&block->lock,
920 RW_LOCK_FLAG_X | RW_LOCK_FLAG_SX));
921 }
922#endif /* UNIV_DEBUG */
923 assert_block_ahi_valid(block);
924
925 block->modify_clock++;
926}
927
928/********************************************************************//**
929Returns the value of the modify clock. The caller must have an s-lock
930or x-lock on the block.
931@return value */
932UNIV_INLINE
933ib_uint64_t
934buf_block_get_modify_clock(
935/*=======================*/
936 buf_block_t* block) /*!< in: block */
937{
938#ifdef UNIV_DEBUG
939 /* No latch is acquired for the shared temporary tablespace. */
940 if (!fsp_is_system_temporary(block->page.id.space())) {
941 ut_ad(rw_lock_own(&(block->lock), RW_LOCK_S)
942 || rw_lock_own(&(block->lock), RW_LOCK_X)
943 || rw_lock_own(&(block->lock), RW_LOCK_SX));
944 }
945#endif /* UNIV_DEBUG */
946
947 return(block->modify_clock);
948}
949
950/** Increments the bufferfix count.
951@param[in,out] bpage block to bufferfix
952@return the count */
953UNIV_INLINE
954ulint
955buf_block_fix(
956 buf_page_t* bpage)
957{
958 return uint32(my_atomic_add32((int32*) &bpage->buf_fix_count, 1) + 1);
959}
960
961/** Increments the bufferfix count.
962@param[in,out] block block to bufferfix
963@return the count */
964UNIV_INLINE
965ulint
966buf_block_fix(
967 buf_block_t* block)
968{
969 return(buf_block_fix(&block->page));
970}
971
972/*******************************************************************//**
973Increments the bufferfix count. */
974UNIV_INLINE
975void
976buf_block_buf_fix_inc_func(
977/*=======================*/
978#ifdef UNIV_DEBUG
979 const char* file, /*!< in: file name */
980 unsigned line, /*!< in: line */
981#endif /* UNIV_DEBUG */
982 buf_block_t* block) /*!< in/out: block to bufferfix */
983{
984#ifdef UNIV_DEBUG
985 /* No debug latch is acquired if block belongs to system temporary.
986 Debug latch is not of much help if access to block is single
987 threaded. */
988 if (!fsp_is_system_temporary(block->page.id.space())) {
989 ibool ret;
990 ret = rw_lock_s_lock_nowait(&block->debug_latch, file, line);
991 ut_a(ret);
992 }
993#endif /* UNIV_DEBUG */
994
995 buf_block_fix(block);
996}
997
998/** Decrements the bufferfix count.
999@param[in,out] bpage block to bufferunfix
1000@return the remaining buffer-fix count */
1001UNIV_INLINE
1002ulint
1003buf_block_unfix(
1004 buf_page_t* bpage)
1005{
1006 uint32 count = uint32(my_atomic_add32((int32*) &bpage->buf_fix_count,
1007 -1));
1008 ut_ad(count != 0);
1009 return count - 1;
1010}
1011
1012/** Decrements the bufferfix count.
1013@param[in,out] block block to bufferunfix
1014@return the remaining buffer-fix count */
1015UNIV_INLINE
1016ulint
1017buf_block_unfix(
1018 buf_block_t* block)
1019{
1020 return(buf_block_unfix(&block->page));
1021}
1022
1023/*******************************************************************//**
1024Decrements the bufferfix count. */
1025UNIV_INLINE
1026void
1027buf_block_buf_fix_dec(
1028/*==================*/
1029 buf_block_t* block) /*!< in/out: block to bufferunfix */
1030{
1031 buf_block_unfix(block);
1032
1033#ifdef UNIV_DEBUG
1034 /* No debug latch is acquired if block belongs to system temporary.
1035 Debug latch is not of much help if access to block is single
1036 threaded. */
1037 if (!fsp_is_system_temporary(block->page.id.space())) {
1038 rw_lock_s_unlock(&block->debug_latch);
1039 }
1040#endif /* UNIV_DEBUG */
1041}
1042
1043/** Returns the buffer pool instance given a page id.
1044@param[in] page_id page id
1045@return buffer pool */
1046UNIV_INLINE
1047buf_pool_t*
1048buf_pool_get(
1049 const page_id_t& page_id)
1050{
1051 /* 2log of BUF_READ_AHEAD_AREA (64) */
1052 ulint ignored_page_no = page_id.page_no() >> 6;
1053
1054 page_id_t id(page_id.space(), ignored_page_no);
1055
1056 ulint i = id.fold() % srv_buf_pool_instances;
1057
1058 return(&buf_pool_ptr[i]);
1059}
1060
1061/******************************************************************//**
1062Returns the buffer pool instance given its array index
1063@return buffer pool */
1064UNIV_INLINE
1065buf_pool_t*
1066buf_pool_from_array(
1067/*================*/
1068 ulint index) /*!< in: array index to get
1069 buffer pool instance from */
1070{
1071 ut_ad(index < MAX_BUFFER_POOLS);
1072 ut_ad(index < srv_buf_pool_instances);
1073 return(&buf_pool_ptr[index]);
1074}
1075
1076/** Returns the control block of a file page, NULL if not found.
1077@param[in] buf_pool buffer pool instance
1078@param[in] page_id page id
1079@return block, NULL if not found */
1080UNIV_INLINE
1081buf_page_t*
1082buf_page_hash_get_low(
1083 buf_pool_t* buf_pool,
1084 const page_id_t& page_id)
1085{
1086 buf_page_t* bpage;
1087
1088#ifdef UNIV_DEBUG
1089 rw_lock_t* hash_lock;
1090
1091 hash_lock = hash_get_lock(buf_pool->page_hash, page_id.fold());
1092 ut_ad(rw_lock_own(hash_lock, RW_LOCK_X)
1093 || rw_lock_own(hash_lock, RW_LOCK_S));
1094#endif /* UNIV_DEBUG */
1095
1096 /* Look for the page in the hash table */
1097
1098 HASH_SEARCH(hash, buf_pool->page_hash, page_id.fold(), buf_page_t*,
1099 bpage,
1100 ut_ad(bpage->in_page_hash && !bpage->in_zip_hash
1101 && buf_page_in_file(bpage)),
1102 page_id.equals_to(bpage->id));
1103 if (bpage) {
1104 ut_a(buf_page_in_file(bpage));
1105 ut_ad(bpage->in_page_hash);
1106 ut_ad(!bpage->in_zip_hash);
1107 ut_ad(buf_pool_from_bpage(bpage) == buf_pool);
1108 }
1109
1110 return(bpage);
1111}
1112
1113/** Returns the control block of a file page, NULL if not found.
1114If the block is found and lock is not NULL then the appropriate
1115page_hash lock is acquired in the specified lock mode. Otherwise,
1116mode value is ignored. It is up to the caller to release the
1117lock. If the block is found and the lock is NULL then the page_hash
1118lock is released by this function.
1119@param[in] buf_pool buffer pool instance
1120@param[in] page_id page id
1121@param[in,out] lock lock of the page hash acquired if bpage is
1122found, NULL otherwise. If NULL is passed then the hash_lock is released by
1123this function.
1124@param[in] lock_mode RW_LOCK_X or RW_LOCK_S. Ignored if
1125lock == NULL
1126@param[in] watch if true, return watch sentinel also.
1127@return pointer to the bpage or NULL; if NULL, lock is also NULL or
1128a watch sentinel. */
1129UNIV_INLINE
1130buf_page_t*
1131buf_page_hash_get_locked(
1132 buf_pool_t* buf_pool,
1133 const page_id_t& page_id,
1134 rw_lock_t** lock,
1135 ulint lock_mode,
1136 bool watch)
1137{
1138 buf_page_t* bpage = NULL;
1139 rw_lock_t* hash_lock;
1140 ulint mode = RW_LOCK_S;
1141
1142 if (lock != NULL) {
1143 *lock = NULL;
1144 ut_ad(lock_mode == RW_LOCK_X
1145 || lock_mode == RW_LOCK_S);
1146 mode = lock_mode;
1147 }
1148
1149 hash_lock = hash_get_lock(buf_pool->page_hash, page_id.fold());
1150
1151 ut_ad(!rw_lock_own(hash_lock, RW_LOCK_X)
1152 && !rw_lock_own(hash_lock, RW_LOCK_S));
1153
1154 if (mode == RW_LOCK_S) {
1155 rw_lock_s_lock(hash_lock);
1156
1157 /* If not own buf_pool_mutex, page_hash can be changed. */
1158 hash_lock = hash_lock_s_confirm(
1159 hash_lock, buf_pool->page_hash, page_id.fold());
1160 } else {
1161 rw_lock_x_lock(hash_lock);
1162 /* If not own buf_pool_mutex, page_hash can be changed. */
1163 hash_lock = hash_lock_x_confirm(
1164 hash_lock, buf_pool->page_hash, page_id.fold());
1165 }
1166
1167 bpage = buf_page_hash_get_low(buf_pool, page_id);
1168
1169 if (!bpage || buf_pool_watch_is_sentinel(buf_pool, bpage)) {
1170 if (!watch) {
1171 bpage = NULL;
1172 }
1173 goto unlock_and_exit;
1174 }
1175
1176 ut_ad(buf_page_in_file(bpage));
1177 ut_ad(page_id.equals_to(bpage->id));
1178
1179 if (lock == NULL) {
1180 /* The caller wants us to release the page_hash lock */
1181 goto unlock_and_exit;
1182 } else {
1183 /* To be released by the caller */
1184 *lock = hash_lock;
1185 goto exit;
1186 }
1187
1188unlock_and_exit:
1189 if (mode == RW_LOCK_S) {
1190 rw_lock_s_unlock(hash_lock);
1191 } else {
1192 rw_lock_x_unlock(hash_lock);
1193 }
1194exit:
1195 return(bpage);
1196}
1197
1198/** Returns the control block of a file page, NULL if not found.
1199If the block is found and lock is not NULL then the appropriate
1200page_hash lock is acquired in the specified lock mode. Otherwise,
1201mode value is ignored. It is up to the caller to release the
1202lock. If the block is found and the lock is NULL then the page_hash
1203lock is released by this function.
1204@param[in] buf_pool buffer pool instance
1205@param[in] page_id page id
1206@param[in,out] lock lock of the page hash acquired if bpage is
1207found, NULL otherwise. If NULL is passed then the hash_lock is released by
1208this function.
1209@param[in] lock_mode RW_LOCK_X or RW_LOCK_S. Ignored if
1210lock == NULL
1211@return pointer to the block or NULL; if NULL, lock is also NULL. */
1212UNIV_INLINE
1213buf_block_t*
1214buf_block_hash_get_locked(
1215 buf_pool_t* buf_pool,
1216 const page_id_t& page_id,
1217 rw_lock_t** lock,
1218 ulint lock_mode)
1219{
1220 buf_page_t* bpage = buf_page_hash_get_locked(buf_pool,
1221 page_id,
1222 lock,
1223 lock_mode);
1224 buf_block_t* block = buf_page_get_block(bpage);
1225
1226 if (block != NULL) {
1227
1228 ut_ad(buf_block_get_state(block) == BUF_BLOCK_FILE_PAGE);
1229 ut_ad(!lock || rw_lock_own(*lock, lock_mode));
1230
1231 return(block);
1232 } else if (bpage) {
1233 /* It is not a block. Just a bpage */
1234 ut_ad(buf_page_in_file(bpage));
1235
1236 if (lock) {
1237 if (lock_mode == RW_LOCK_S) {
1238 rw_lock_s_unlock(*lock);
1239 } else {
1240 rw_lock_x_unlock(*lock);
1241 }
1242 }
1243 *lock = NULL;
1244 return(NULL);
1245 }
1246
1247 ut_ad(!bpage);
1248 ut_ad(lock == NULL ||*lock == NULL);
1249 return(NULL);
1250}
1251
1252/** Returns TRUE if the page can be found in the buffer pool hash table.
1253NOTE that it is possible that the page is not yet read from disk,
1254though.
1255@param[in] page_id page id
1256@return TRUE if found in the page hash table */
1257UNIV_INLINE
1258ibool
1259buf_page_peek(
1260 const page_id_t& page_id)
1261{
1262 buf_pool_t* buf_pool = buf_pool_get(page_id);
1263
1264 return(buf_page_hash_get(buf_pool, page_id) != NULL);
1265}
1266
1267/********************************************************************//**
1268Releases a compressed-only page acquired with buf_page_get_zip(). */
1269UNIV_INLINE
1270void
1271buf_page_release_zip(
1272/*=================*/
1273 buf_page_t* bpage) /*!< in: buffer block */
1274{
1275 ut_ad(bpage);
1276 ut_a(bpage->buf_fix_count > 0);
1277
1278 switch (buf_page_get_state(bpage)) {
1279 case BUF_BLOCK_FILE_PAGE:
1280#ifdef UNIV_DEBUG
1281 {
1282 /* No debug latch is acquired if block belongs to system
1283 temporary. Debug latch is not of much help if access to block
1284 is single threaded. */
1285 buf_block_t* block = reinterpret_cast<buf_block_t*>(bpage);
1286 if (!fsp_is_system_temporary(block->page.id.space())) {
1287 rw_lock_s_unlock(&block->debug_latch);
1288 }
1289 }
1290#endif /* UNIV_DEBUG */
1291 /* Fall through */
1292 case BUF_BLOCK_ZIP_PAGE:
1293 case BUF_BLOCK_ZIP_DIRTY:
1294 buf_block_unfix(reinterpret_cast<buf_block_t*>(bpage));
1295 return;
1296
1297 case BUF_BLOCK_POOL_WATCH:
1298 case BUF_BLOCK_NOT_USED:
1299 case BUF_BLOCK_READY_FOR_USE:
1300 case BUF_BLOCK_MEMORY:
1301 case BUF_BLOCK_REMOVE_HASH:
1302 break;
1303 }
1304
1305 ut_error;
1306}
1307
1308/********************************************************************//**
1309Releases a latch, if specified. */
1310UNIV_INLINE
1311void
1312buf_page_release_latch(
1313/*===================*/
1314 buf_block_t* block, /*!< in: buffer block */
1315 ulint rw_latch) /*!< in: RW_S_LATCH, RW_X_LATCH,
1316 RW_NO_LATCH */
1317{
1318#ifdef UNIV_DEBUG
1319 /* No debug latch is acquired if block belongs to system
1320 temporary. Debug latch is not of much help if access to block
1321 is single threaded. */
1322 if (!fsp_is_system_temporary(block->page.id.space())) {
1323 rw_lock_s_unlock(&block->debug_latch);
1324 }
1325#endif /* UNIV_DEBUG */
1326
1327 if (rw_latch == RW_S_LATCH) {
1328 rw_lock_s_unlock(&block->lock);
1329 } else if (rw_latch == RW_SX_LATCH) {
1330 rw_lock_sx_unlock(&block->lock);
1331 } else if (rw_latch == RW_X_LATCH) {
1332 rw_lock_x_unlock(&block->lock);
1333 }
1334}
1335
1336#ifdef UNIV_DEBUG
1337/*********************************************************************//**
1338Adds latch level info for the rw-lock protecting the buffer frame. This
1339should be called in the debug version after a successful latching of a
1340page if we know the latching order level of the acquired latch. */
1341UNIV_INLINE
1342void
1343buf_block_dbg_add_level(
1344/*====================*/
1345 buf_block_t* block, /*!< in: buffer page
1346 where we have acquired latch */
1347 latch_level_t level) /*!< in: latching order level */
1348{
1349 sync_check_lock(&block->lock, level);
1350}
1351
1352#endif /* UNIV_DEBUG */
1353/********************************************************************//**
1354Acquire mutex on all buffer pool instances. */
1355UNIV_INLINE
1356void
1357buf_pool_mutex_enter_all(void)
1358/*==========================*/
1359{
1360 for (ulint i = 0; i < srv_buf_pool_instances; ++i) {
1361 buf_pool_t* buf_pool = buf_pool_from_array(i);
1362
1363 buf_pool_mutex_enter(buf_pool);
1364 }
1365}
1366
1367/********************************************************************//**
1368Release mutex on all buffer pool instances. */
1369UNIV_INLINE
1370void
1371buf_pool_mutex_exit_all(void)
1372/*=========================*/
1373{
1374 ulint i;
1375
1376 for (i = 0; i < srv_buf_pool_instances; i++) {
1377 buf_pool_t* buf_pool;
1378
1379 buf_pool = buf_pool_from_array(i);
1380 buf_pool_mutex_exit(buf_pool);
1381 }
1382}
1383/*********************************************************************//**
1384Get the nth chunk's buffer block in the specified buffer pool.
1385@return the nth chunk's buffer block. */
1386UNIV_INLINE
1387buf_block_t*
1388buf_get_nth_chunk_block(
1389/*====================*/
1390 const buf_pool_t* buf_pool, /*!< in: buffer pool instance */
1391 ulint n, /*!< in: nth chunk in the buffer pool */
1392 ulint* chunk_size) /*!< in: chunk size */
1393{
1394 const buf_chunk_t* chunk;
1395
1396 chunk = buf_pool->chunks + n;
1397 *chunk_size = chunk->size;
1398 return(chunk->blocks);
1399}
1400
1401/********************************************************************//**
1402Get buf frame. */
1403UNIV_INLINE
1404void *
1405buf_page_get_frame(
1406/*===============*/
1407 const buf_page_t* bpage) /*!< in: buffer pool page */
1408{
1409 /* In encryption/compression buffer pool page may contain extra
1410 buffer where result is stored. */
1411 if (bpage->slot && bpage->slot->out_buf) {
1412 return bpage->slot->out_buf;
1413 } else if (bpage->zip.data) {
1414 return bpage->zip.data;
1415 } else {
1416 return ((buf_block_t*) bpage)->frame;
1417 }
1418}
1419
1420/** Verify the possibility that a stored page is not in buffer pool.
1421@param[in] withdraw_clock withdraw clock when stored the page
1422@retval true if the page might be relocated */
1423UNIV_INLINE
1424bool
1425buf_pool_is_obsolete(
1426 ulint withdraw_clock)
1427{
1428 return(UNIV_UNLIKELY(buf_pool_withdrawing
1429 || buf_withdraw_clock != withdraw_clock));
1430}
1431
1432/** Calculate aligned buffer pool size based on srv_buf_pool_chunk_unit,
1433if needed.
1434@param[in] size size in bytes
1435@return aligned size */
1436UNIV_INLINE
1437ulint
1438buf_pool_size_align(
1439 ulint size)
1440{
1441 const ib_uint64_t m = ((ib_uint64_t)srv_buf_pool_instances) * srv_buf_pool_chunk_unit;
1442 size = ut_max(size, srv_buf_pool_min_size);
1443
1444 if (size % m == 0) {
1445 return(size);
1446 } else {
1447 return (ulint)((size / m + 1) * m);
1448 }
1449}
1450