1/*****************************************************************************
2
3Copyright (c) 2013, 2014, Oracle and/or its affiliates. All Rights Reserved.
4
5This program is free software; you can redistribute it and/or modify it under
6the terms of the GNU General Public License as published by the Free Software
7Foundation; version 2 of the License.
8
9This program is distributed in the hope that it will be useful, but WITHOUT
10ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
11FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
12
13You should have received a copy of the GNU General Public License along with
14this program; if not, write to the Free Software Foundation, Inc.,
1551 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
16
17*****************************************************************************/
18
19/******************************************************************//**
20@file include/ut0pool.h
21Object pool.
22
23Created 2012-Feb-26 Sunny Bains
24***********************************************************************/
25
26#ifndef ut0pool_h
27#define ut0pool_h
28
29#include <vector>
30#include <queue>
31#include <functional>
32
33#include "ut0new.h"
34
35/** Allocate the memory for the object in blocks. We keep the objects sorted
36on pointer so that they are closer together in case they have to be iterated
37over in a list. */
38template <typename Type, typename Factory, typename LockStrategy>
39struct Pool {
40
41 typedef Type value_type;
42
43 // FIXME: Add an assertion to check alignment and offset is
44 // as we expect it. Also, sizeof(void*) can be 8, can we impove on this.
45 struct Element {
46 Pool* m_pool;
47 value_type m_type;
48 };
49
50 /** Constructor
51 @param size size of the memory block */
52 Pool(size_t size)
53 :
54 m_end(),
55 m_start(),
56 m_size(size),
57 m_last()
58 {
59 ut_a(size >= sizeof(Element));
60
61 m_lock_strategy.create();
62
63 ut_a(m_start == 0);
64
65 m_start = reinterpret_cast<Element*>(ut_zalloc_nokey(m_size));
66
67 m_last = m_start;
68
69 m_end = &m_start[m_size / sizeof(*m_start)];
70
71 /* Note: Initialise only a small subset, even though we have
72 allocated all the memory. This is required only because PFS
73 (MTR) results change if we instantiate too many mutexes up
74 front. */
75
76 init(ut_min(size_t(16), size_t(m_end - m_start)));
77
78 ut_ad(m_pqueue.size() <= size_t(m_last - m_start));
79 }
80
81 /** Destructor */
82 ~Pool()
83 {
84 m_lock_strategy.destroy();
85
86 for (Element* elem = m_start; elem != m_last; ++elem) {
87
88 ut_ad(elem->m_pool == this);
89 /* Unpoison the memory for AddressSanitizer */
90 MEM_UNDEFINED(&elem->m_type, sizeof elem->m_type);
91 /* Declare the contents as initialized for Valgrind;
92 we checked this in mem_free(). */
93 UNIV_MEM_VALID(&elem->m_type, sizeof elem->m_type);
94 Factory::destroy(&elem->m_type);
95 }
96
97 ut_free(m_start);
98 m_end = m_last = m_start = 0;
99 m_size = 0;
100 }
101
102 /** Get an object from the pool.
103 @retrun a free instance or NULL if exhausted. */
104 Type* get()
105 {
106 Element* elem;
107
108 m_lock_strategy.enter();
109
110 if (!m_pqueue.empty()) {
111
112 elem = m_pqueue.top();
113 m_pqueue.pop();
114
115 } else if (m_last < m_end) {
116
117 /* Initialise the remaining elements. */
118 init(size_t(m_end - m_last));
119
120 ut_ad(!m_pqueue.empty());
121
122 elem = m_pqueue.top();
123 m_pqueue.pop();
124 } else {
125 elem = NULL;
126 }
127
128 m_lock_strategy.exit();
129
130 if (elem) {
131 /* Unpoison the memory for AddressSanitizer */
132 MEM_UNDEFINED(&elem->m_type, sizeof elem->m_type);
133 /* Declare the memory initialized for Valgrind.
134 The trx_t that are released to the pool are
135 actually initialized; we checked that by
136 UNIV_MEM_ASSERT_RW() in mem_free() below. */
137 UNIV_MEM_VALID(&elem->m_type, sizeof elem->m_type);
138 return &elem->m_type;
139 }
140
141 return NULL;
142 }
143
144 /** Add the object to the pool.
145 @param ptr object to free */
146 static void mem_free(value_type* ptr)
147 {
148 Element* elem;
149 byte* p = reinterpret_cast<byte*>(ptr + 1);
150
151 elem = reinterpret_cast<Element*>(p - sizeof(*elem));
152 UNIV_MEM_ASSERT_RW(&elem->m_type, sizeof elem->m_type);
153
154 elem->m_pool->put(elem);
155 MEM_NOACCESS(&elem->m_type, sizeof elem->m_type);
156 }
157
158protected:
159 // Disable copying
160 Pool(const Pool&);
161 Pool& operator=(const Pool&);
162
163private:
164
165 /* We only need to compare on pointer address. */
166 typedef std::priority_queue<
167 Element*,
168 std::vector<Element*, ut_allocator<Element*> >,
169 std::greater<Element*> > pqueue_t;
170
171 /** Release the object to the free pool
172 @param elem element to free */
173 void put(Element* elem)
174 {
175 m_lock_strategy.enter();
176
177 ut_ad(elem >= m_start && elem < m_last);
178
179 ut_ad(Factory::debug(&elem->m_type));
180
181 m_pqueue.push(elem);
182
183 m_lock_strategy.exit();
184 }
185
186 /** Initialise the elements.
187 @param n_elems Number of elements to initialise */
188 void init(size_t n_elems)
189 {
190 ut_ad(size_t(m_end - m_last) >= n_elems);
191
192 for (size_t i = 0; i < n_elems; ++i, ++m_last) {
193
194 m_last->m_pool = this;
195 Factory::init(&m_last->m_type);
196 m_pqueue.push(m_last);
197 }
198
199 ut_ad(m_last <= m_end);
200 }
201
202private:
203 /** Pointer to the last element */
204 Element* m_end;
205
206 /** Pointer to the first element */
207 Element* m_start;
208
209 /** Size of the block in bytes */
210 size_t m_size;
211
212 /** Upper limit of used space */
213 Element* m_last;
214
215 /** Priority queue ordered on the pointer addresse. */
216 pqueue_t m_pqueue;
217
218 /** Lock strategy to use */
219 LockStrategy m_lock_strategy;
220};
221
222template <typename Pool, typename LockStrategy>
223struct PoolManager {
224
225 typedef Pool PoolType;
226 typedef typename PoolType::value_type value_type;
227
228 PoolManager(size_t size)
229 :
230 m_size(size)
231 {
232 create();
233 }
234
235 ~PoolManager()
236 {
237 destroy();
238
239 ut_a(m_pools.empty());
240 }
241
242 /** Get an element from one of the pools.
243 @return instance or NULL if pool is empty. */
244 value_type* get()
245 {
246 size_t index = 0;
247 size_t delay = 1;
248 value_type* ptr = NULL;
249
250 do {
251 m_lock_strategy.enter();
252
253 ut_ad(!m_pools.empty());
254
255 size_t n_pools = m_pools.size();
256
257 PoolType* pool = m_pools[index % n_pools];
258
259 m_lock_strategy.exit();
260
261 ptr = pool->get();
262
263 if (ptr == 0 && (index / n_pools) > 2) {
264
265 if (!add_pool(n_pools)) {
266
267 ib::error() << "Failed to allocate"
268 " memory for a pool of size "
269 << m_size << " bytes. Will"
270 " wait for " << delay
271 << " seconds for a thread to"
272 " free a resource";
273
274 /* There is nothing much we can do
275 except crash and burn, however lets
276 be a little optimistic and wait for
277 a resource to be freed. */
278 os_thread_sleep(delay * 1000000);
279
280 if (delay < 32) {
281 delay <<= 1;
282 }
283
284 } else {
285 delay = 1;
286 }
287 }
288
289 ++index;
290
291 } while (ptr == NULL);
292
293 return(ptr);
294 }
295
296 static void mem_free(value_type* ptr)
297 {
298 PoolType::mem_free(ptr);
299 }
300
301private:
302 /** Add a new pool
303 @param n_pools Number of pools that existed when the add pool was
304 called.
305 @return true on success */
306 bool add_pool(size_t n_pools)
307 {
308 bool added = false;
309
310 m_lock_strategy.enter();
311
312 if (n_pools < m_pools.size()) {
313 /* Some other thread already added a pool. */
314 added = true;
315 } else {
316 PoolType* pool;
317
318 ut_ad(n_pools == m_pools.size());
319
320 pool = UT_NEW_NOKEY(PoolType(m_size));
321
322 if (pool != NULL) {
323
324 ut_ad(n_pools <= m_pools.size());
325
326 m_pools.push_back(pool);
327
328 ib::info() << "Number of pools: "
329 << m_pools.size();
330
331 added = true;
332 }
333 }
334
335 ut_ad(n_pools < m_pools.size() || !added);
336
337 m_lock_strategy.exit();
338
339 return(added);
340 }
341
342 /** Create the pool manager. */
343 void create()
344 {
345 ut_a(m_size > sizeof(value_type));
346 m_lock_strategy.create();
347
348 add_pool(0);
349 }
350
351 /** Release the resources. */
352 void destroy()
353 {
354 typename Pools::iterator it;
355 typename Pools::iterator end = m_pools.end();
356
357 for (it = m_pools.begin(); it != end; ++it) {
358 PoolType* pool = *it;
359
360 UT_DELETE(pool);
361 }
362
363 m_pools.clear();
364
365 m_lock_strategy.destroy();
366 }
367private:
368 // Disable copying
369 PoolManager(const PoolManager&);
370 PoolManager& operator=(const PoolManager&);
371
372 typedef std::vector<PoolType*, ut_allocator<PoolType*> > Pools;
373
374 /** Size of each block */
375 size_t m_size;
376
377 /** Pools managed this manager */
378 Pools m_pools;
379
380 /** Lock strategy to use */
381 LockStrategy m_lock_strategy;
382};
383
384#endif /* ut0pool_h */
385