1/* Copyright JS Foundation and other contributors, http://js.foundation
2 *
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16/**
17 * Heap implementation
18 */
19
20#include "ecma-gc.h"
21#include "jcontext.h"
22#include "jmem.h"
23#include "jrt-bit-fields.h"
24#include "jrt-libc-includes.h"
25
26#define JMEM_ALLOCATOR_INTERNAL
27#include "jmem-allocator-internal.h"
28
29/** \addtogroup mem Memory allocation
30 * @{
31 *
32 * \addtogroup heap Heap
33 * @{
34 */
35
36#if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
37/**
38 * End of list marker.
39 */
40#define JMEM_HEAP_END_OF_LIST ((uint32_t) 0xffffffff)
41
42/**
43 * @{
44 */
45#ifdef ECMA_VALUE_CAN_STORE_UINTPTR_VALUE_DIRECTLY
46/* In this case we simply store the pointer, since it fits anyway. */
47#define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) (p))
48#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) (u))
49#else /* !ECMA_VALUE_CAN_STORE_UINTPTR_VALUE_DIRECTLY */
50#define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) ((uint8_t *) (p) - JERRY_HEAP_CONTEXT (area)))
51#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) (JERRY_HEAP_CONTEXT (area) + (u)))
52#endif /* ECMA_VALUE_CAN_STORE_UINTPTR_VALUE_DIRECTLY */
53/**
54 * @}
55 */
56
57/**
58 * Get end of region
59 *
60 * @return pointer to the end of the region
61 */
62static inline jmem_heap_free_t * JERRY_ATTR_ALWAYS_INLINE JERRY_ATTR_PURE
63jmem_heap_get_region_end (jmem_heap_free_t *curr_p) /**< current region */
64{
65 return (jmem_heap_free_t *) ((uint8_t *) curr_p + curr_p->size);
66} /* jmem_heap_get_region_end */
67#endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
68
69/**
70 * Startup initialization of heap
71 */
72void
73jmem_heap_init (void)
74{
75#if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
76#if !ENABLED (JERRY_CPOINTER_32_BIT)
77 /* the maximum heap size for 16bit compressed pointers should be 512K */
78 JERRY_ASSERT (((UINT16_MAX + 1) << JMEM_ALIGNMENT_LOG) >= JMEM_HEAP_SIZE);
79#endif /* !ENABLED (JERRY_CPOINTER_32_BIT) */
80 JERRY_ASSERT ((uintptr_t) JERRY_HEAP_CONTEXT (area) % JMEM_ALIGNMENT == 0);
81
82 JERRY_CONTEXT (jmem_heap_limit) = CONFIG_GC_LIMIT;
83
84 jmem_heap_free_t *const region_p = (jmem_heap_free_t *) JERRY_HEAP_CONTEXT (area);
85
86 region_p->size = JMEM_HEAP_AREA_SIZE;
87 region_p->next_offset = JMEM_HEAP_END_OF_LIST;
88
89 JERRY_HEAP_CONTEXT (first).size = 0;
90 JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (region_p);
91
92 JERRY_CONTEXT (jmem_heap_list_skip_p) = &JERRY_HEAP_CONTEXT (first);
93
94 JMEM_VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
95 JMEM_VALGRIND_NOACCESS_SPACE (JERRY_HEAP_CONTEXT (area), JMEM_HEAP_AREA_SIZE);
96
97#endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
98 JMEM_HEAP_STAT_INIT ();
99} /* jmem_heap_init */
100
101/**
102 * Finalize heap
103 */
104void
105jmem_heap_finalize (void)
106{
107 JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) == 0);
108#if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
109 JMEM_VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), JMEM_HEAP_SIZE);
110#endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
111} /* jmem_heap_finalize */
112
113/**
114 * Allocation of memory region.
115 *
116 * See also:
117 * jmem_heap_alloc_block
118 *
119 * @return pointer to allocated memory block - if allocation is successful,
120 * NULL - if there is not enough memory.
121 */
122static void * JERRY_ATTR_HOT
123jmem_heap_alloc (const size_t size) /**< size of requested block */
124{
125#if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
126 /* Align size. */
127 const size_t required_size = ((size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT) * JMEM_ALIGNMENT;
128 jmem_heap_free_t *data_space_p = NULL;
129
130 JMEM_VALGRIND_DEFINED_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
131
132 /* Fast path for 8 byte chunks, first region is guaranteed to be sufficient. */
133 if (required_size == JMEM_ALIGNMENT
134 && JERRY_LIKELY (JERRY_HEAP_CONTEXT (first).next_offset != JMEM_HEAP_END_OF_LIST))
135 {
136 data_space_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
137 JERRY_ASSERT (jmem_is_heap_pointer (data_space_p));
138
139 JMEM_VALGRIND_DEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t));
140 JERRY_CONTEXT (jmem_heap_allocated_size) += JMEM_ALIGNMENT;
141
142 if (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
143 {
144 JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
145 }
146
147 if (data_space_p->size == JMEM_ALIGNMENT)
148 {
149 JERRY_HEAP_CONTEXT (first).next_offset = data_space_p->next_offset;
150 }
151 else
152 {
153 JERRY_ASSERT (data_space_p->size > JMEM_ALIGNMENT);
154
155 jmem_heap_free_t *remaining_p;
156 remaining_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset) + 1;
157
158 JMEM_VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t));
159 remaining_p->size = data_space_p->size - JMEM_ALIGNMENT;
160 remaining_p->next_offset = data_space_p->next_offset;
161 JMEM_VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t));
162
163 JERRY_HEAP_CONTEXT (first).next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
164 }
165
166 JMEM_VALGRIND_NOACCESS_SPACE (data_space_p, sizeof (jmem_heap_free_t));
167
168 if (JERRY_UNLIKELY (data_space_p == JERRY_CONTEXT (jmem_heap_list_skip_p)))
169 {
170 JERRY_CONTEXT (jmem_heap_list_skip_p) = JMEM_HEAP_GET_ADDR_FROM_OFFSET (JERRY_HEAP_CONTEXT (first).next_offset);
171 }
172 }
173 /* Slow path for larger regions. */
174 else
175 {
176 uint32_t current_offset = JERRY_HEAP_CONTEXT (first).next_offset;
177 jmem_heap_free_t *prev_p = &JERRY_HEAP_CONTEXT (first);
178
179 while (JERRY_LIKELY (current_offset != JMEM_HEAP_END_OF_LIST))
180 {
181 jmem_heap_free_t *current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (current_offset);
182 JERRY_ASSERT (jmem_is_heap_pointer (current_p));
183 JMEM_VALGRIND_DEFINED_SPACE (current_p, sizeof (jmem_heap_free_t));
184
185 const uint32_t next_offset = current_p->next_offset;
186 JERRY_ASSERT (next_offset == JMEM_HEAP_END_OF_LIST
187 || jmem_is_heap_pointer (JMEM_HEAP_GET_ADDR_FROM_OFFSET (next_offset)));
188
189 if (current_p->size >= required_size)
190 {
191 /* Region is sufficiently big, store address. */
192 data_space_p = current_p;
193
194 /* Region was larger than necessary. */
195 if (current_p->size > required_size)
196 {
197 /* Get address of remaining space. */
198 jmem_heap_free_t *const remaining_p = (jmem_heap_free_t *) ((uint8_t *) current_p + required_size);
199
200 /* Update metadata. */
201 JMEM_VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t));
202 remaining_p->size = current_p->size - (uint32_t) required_size;
203 remaining_p->next_offset = next_offset;
204 JMEM_VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t));
205
206 /* Update list. */
207 JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
208 prev_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
209 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
210 }
211 /* Block is an exact fit. */
212 else
213 {
214 /* Remove the region from the list. */
215 JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
216 prev_p->next_offset = next_offset;
217 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
218 }
219
220 JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;
221
222 /* Found enough space. */
223 JERRY_CONTEXT (jmem_heap_allocated_size) += required_size;
224
225 while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
226 {
227 JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
228 }
229
230 break;
231 }
232
233 JMEM_VALGRIND_NOACCESS_SPACE (current_p, sizeof (jmem_heap_free_t));
234 /* Next in list. */
235 prev_p = current_p;
236 current_offset = next_offset;
237 }
238 }
239
240 JMEM_VALGRIND_NOACCESS_SPACE (&JERRY_HEAP_CONTEXT (first), sizeof (jmem_heap_free_t));
241
242 JERRY_ASSERT ((uintptr_t) data_space_p % JMEM_ALIGNMENT == 0);
243 JMEM_VALGRIND_MALLOCLIKE_SPACE (data_space_p, size);
244
245 return (void *) data_space_p;
246#else /* ENABLED (JERRY_SYSTEM_ALLOCATOR) */
247 JERRY_CONTEXT (jmem_heap_allocated_size) += size;
248
249 while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
250 {
251 JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
252 }
253
254 return malloc (size);
255#endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
256} /* jmem_heap_alloc */
257
258/**
259 * Allocation of memory block, reclaiming memory if the request cannot be fulfilled.
260 *
261 * Note:
262 * Each failed allocation attempt tries to reclaim memory with an increasing pressure,
263 * up to 'max_pressure', or until a sufficient memory block is found. When JMEM_PRESSURE_FULL
264 * is reached, the engine is terminated with ERR_OUT_OF_MEMORY. The `max_pressure` argument
265 * can be used to limit the maximum pressure, and prevent the engine from terminating.
266 *
267 * @return NULL, if the required memory size is 0 or not enough memory
268 * pointer to the allocated memory block, if allocation is successful
269 */
270static void *
271jmem_heap_gc_and_alloc_block (const size_t size, /**< required memory size */
272 jmem_pressure_t max_pressure) /**< pressure limit */
273{
274 if (JERRY_UNLIKELY (size == 0))
275 {
276 return NULL;
277 }
278
279 jmem_pressure_t pressure = JMEM_PRESSURE_NONE;
280
281#if !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC)
282 if (JERRY_CONTEXT (jmem_heap_allocated_size) + size >= JERRY_CONTEXT (jmem_heap_limit))
283 {
284 pressure = JMEM_PRESSURE_LOW;
285 ecma_free_unused_memory (pressure);
286 }
287#else /* !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC) */
288 ecma_gc_run ();
289#endif /* ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC) */
290
291 void *data_space_p = jmem_heap_alloc (size);
292
293 /* cppcheck-suppress memleak */
294 while (JERRY_UNLIKELY (data_space_p == NULL) && JERRY_LIKELY (pressure < max_pressure))
295 {
296 pressure++;
297 ecma_free_unused_memory (pressure);
298 data_space_p = jmem_heap_alloc (size);
299 }
300
301 return data_space_p;
302} /* jmem_heap_gc_and_alloc_block */
303
304/**
305 * Internal method for allocating a memory block.
306 */
307extern inline void * JERRY_ATTR_HOT JERRY_ATTR_ALWAYS_INLINE
308jmem_heap_alloc_block_internal (const size_t size) /**< required memory size */
309{
310 return jmem_heap_gc_and_alloc_block (size, JMEM_PRESSURE_FULL);
311} /* jmem_heap_alloc_block_internal */
312
313/**
314 * Allocation of memory block, reclaiming unused memory if there is not enough.
315 *
316 * Note:
317 * If a sufficiently sized block can't be found, the engine will be terminated with ERR_OUT_OF_MEMORY.
318 *
319 * @return NULL, if the required memory is 0
320 * pointer to allocated memory block, otherwise
321 */
322extern inline void * JERRY_ATTR_HOT JERRY_ATTR_ALWAYS_INLINE
323jmem_heap_alloc_block (const size_t size) /**< required memory size */
324{
325 void *block_p = jmem_heap_gc_and_alloc_block (size, JMEM_PRESSURE_FULL);
326 JMEM_HEAP_STAT_ALLOC (size);
327 return block_p;
328} /* jmem_heap_alloc_block */
329
330/**
331 * Allocation of memory block, reclaiming unused memory if there is not enough.
332 *
333 * Note:
334 * If a sufficiently sized block can't be found, NULL will be returned.
335 *
336 * @return NULL, if the required memory size is 0
337 * also NULL, if the allocation has failed
338 * pointer to the allocated memory block, otherwise
339 */
340extern inline void * JERRY_ATTR_HOT JERRY_ATTR_ALWAYS_INLINE
341jmem_heap_alloc_block_null_on_error (const size_t size) /**< required memory size */
342{
343 void *block_p = jmem_heap_gc_and_alloc_block (size, JMEM_PRESSURE_HIGH);
344
345#if ENABLED (JERRY_MEM_STATS)
346 if (block_p != NULL)
347 {
348 JMEM_HEAP_STAT_ALLOC (size);
349 }
350#endif /* ENABLED (JERRY_MEM_STATS) */
351
352 return block_p;
353} /* jmem_heap_alloc_block_null_on_error */
354
355#if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
356/**
357 * Finds the block in the free block list which preceeds the argument block
358 *
359 * @return pointer to the preceeding block
360 */
361static jmem_heap_free_t *
362jmem_heap_find_prev (const jmem_heap_free_t * const block_p) /**< which memory block's predecessor we're looking for */
363{
364 const jmem_heap_free_t *prev_p;
365
366 if (block_p > JERRY_CONTEXT (jmem_heap_list_skip_p))
367 {
368 prev_p = JERRY_CONTEXT (jmem_heap_list_skip_p);
369 }
370 else
371 {
372 prev_p = &JERRY_HEAP_CONTEXT (first);
373 }
374
375 JERRY_ASSERT (jmem_is_heap_pointer (block_p));
376 const uint32_t block_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (block_p);
377
378 JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
379 /* Find position of region in the list. */
380 while (prev_p->next_offset < block_offset)
381 {
382 const jmem_heap_free_t * const next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
383 JERRY_ASSERT (jmem_is_heap_pointer (next_p));
384
385 JMEM_VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
386 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
387 prev_p = next_p;
388 }
389
390 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
391 return (jmem_heap_free_t *) prev_p;
392} /* jmem_heap_find_prev */
393
394/**
395 * Inserts the block into the free chain after a specified block.
396 *
397 * Note:
398 * 'jmem_heap_find_prev' can and should be used to find the previous free block
399 */
400static void
401jmem_heap_insert_block (jmem_heap_free_t *block_p, /**< block to insert */
402 jmem_heap_free_t *prev_p, /**< the free block after which to insert 'block_p' */
403 const size_t size) /**< size of the inserted block */
404{
405 JERRY_ASSERT ((uintptr_t) block_p % JMEM_ALIGNMENT == 0);
406 JERRY_ASSERT (size % JMEM_ALIGNMENT == 0);
407
408 JMEM_VALGRIND_NOACCESS_SPACE (block_p, size);
409
410 JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
411 jmem_heap_free_t *next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
412 JMEM_VALGRIND_DEFINED_SPACE (block_p, sizeof (jmem_heap_free_t));
413 JMEM_VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
414
415 const uint32_t block_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (block_p);
416
417 /* Update prev. */
418 if (jmem_heap_get_region_end (prev_p) == block_p)
419 {
420 /* Can be merged. */
421 prev_p->size += (uint32_t) size;
422 JMEM_VALGRIND_NOACCESS_SPACE (block_p, sizeof (jmem_heap_free_t));
423 block_p = prev_p;
424 }
425 else
426 {
427 block_p->size = (uint32_t) size;
428 prev_p->next_offset = block_offset;
429 }
430
431 /* Update next. */
432 if (jmem_heap_get_region_end (block_p) == next_p)
433 {
434 /* Can be merged. */
435 block_p->size += next_p->size;
436 block_p->next_offset = next_p->next_offset;
437 }
438 else
439 {
440 block_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (next_p);
441 }
442
443 JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;
444
445 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
446 JMEM_VALGRIND_NOACCESS_SPACE (block_p, sizeof (jmem_heap_free_t));
447 JMEM_VALGRIND_NOACCESS_SPACE (next_p, sizeof (jmem_heap_free_t));
448} /* jmem_heap_insert_block */
449#endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
450
451/**
452 * Internal method for freeing a memory block.
453 */
454void JERRY_ATTR_HOT
455jmem_heap_free_block_internal (void *ptr, /**< pointer to beginning of data space of the block */
456 const size_t size) /**< size of allocated region */
457{
458 JERRY_ASSERT (size > 0);
459 JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size));
460 JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_allocated_size) > 0);
461
462#if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
463 /* checking that ptr points to the heap */
464 JERRY_ASSERT (jmem_is_heap_pointer (ptr));
465 JERRY_ASSERT ((uintptr_t) ptr % JMEM_ALIGNMENT == 0);
466
467 const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
468
469 jmem_heap_free_t *const block_p = (jmem_heap_free_t *) ptr;
470 jmem_heap_free_t *const prev_p = jmem_heap_find_prev (block_p);
471 jmem_heap_insert_block (block_p, prev_p, aligned_size);
472
473 JERRY_CONTEXT (jmem_heap_allocated_size) -= aligned_size;
474
475 JMEM_VALGRIND_FREELIKE_SPACE (ptr);
476#else /* ENABLED (JERRY_SYSTEM_ALLOCATOR) */
477 JERRY_CONTEXT (jmem_heap_allocated_size) -= size;
478 free (ptr);
479#endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
480 while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_GC_LIMIT <= JERRY_CONTEXT (jmem_heap_limit))
481 {
482 JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_GC_LIMIT;
483 }
484
485 JERRY_ASSERT (JERRY_CONTEXT (jmem_heap_limit) >= JERRY_CONTEXT (jmem_heap_allocated_size));
486} /* jmem_heap_free_block_internal */
487
488/**
489 * Reallocates the memory region pointed to by 'ptr', changing the size of the allocated region.
490 *
491 * @return pointer to the reallocated region
492 */
493void * JERRY_ATTR_HOT
494jmem_heap_realloc_block (void *ptr, /**< memory region to reallocate */
495 const size_t old_size, /**< current size of the region */
496 const size_t new_size) /**< desired new size */
497{
498#if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
499 JERRY_ASSERT (jmem_is_heap_pointer (ptr));
500 JERRY_ASSERT ((uintptr_t) ptr % JMEM_ALIGNMENT == 0);
501 JERRY_ASSERT (old_size != 0);
502 JERRY_ASSERT (new_size != 0);
503
504 jmem_heap_free_t * const block_p = (jmem_heap_free_t *) ptr;
505 const size_t aligned_new_size = (new_size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
506 const size_t aligned_old_size = (old_size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
507
508 if (aligned_old_size == aligned_new_size)
509 {
510 JMEM_VALGRIND_RESIZE_SPACE (block_p, old_size, new_size);
511 JMEM_HEAP_STAT_FREE (old_size);
512 JMEM_HEAP_STAT_ALLOC (new_size);
513 return block_p;
514 }
515
516 if (aligned_new_size < aligned_old_size)
517 {
518 JMEM_VALGRIND_RESIZE_SPACE (block_p, old_size, new_size);
519 JMEM_HEAP_STAT_FREE (old_size);
520 JMEM_HEAP_STAT_ALLOC (new_size);
521 jmem_heap_insert_block ((jmem_heap_free_t *) ((uint8_t *) block_p + aligned_new_size),
522 jmem_heap_find_prev (block_p),
523 aligned_old_size - aligned_new_size);
524
525 JERRY_CONTEXT (jmem_heap_allocated_size) -= (aligned_old_size - aligned_new_size);
526 while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_GC_LIMIT <= JERRY_CONTEXT (jmem_heap_limit))
527 {
528 JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_GC_LIMIT;
529 }
530
531 return block_p;
532 }
533
534 void *ret_block_p = NULL;
535 const size_t required_size = aligned_new_size - aligned_old_size;
536
537#if !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC)
538 if (JERRY_CONTEXT (jmem_heap_allocated_size) + required_size >= JERRY_CONTEXT (jmem_heap_limit))
539 {
540 ecma_free_unused_memory (JMEM_PRESSURE_LOW);
541 }
542#else /* !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC) */
543 ecma_gc_run ();
544#endif /* ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC) */
545
546 jmem_heap_free_t *prev_p = jmem_heap_find_prev (block_p);
547 JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
548 jmem_heap_free_t * const next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
549
550 /* Check if block can be extended at the end */
551 if (((jmem_heap_free_t *) ((uint8_t *) block_p + aligned_old_size)) == next_p)
552 {
553 JMEM_VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
554
555 if (required_size <= next_p->size)
556 {
557 /* Block can be extended, update the list. */
558 if (required_size == next_p->size)
559 {
560 prev_p->next_offset = next_p->next_offset;
561 }
562 else
563 {
564 jmem_heap_free_t *const new_next_p = (jmem_heap_free_t *) ((uint8_t *) next_p + required_size);
565 JMEM_VALGRIND_DEFINED_SPACE (new_next_p, sizeof (jmem_heap_free_t));
566 new_next_p->next_offset = next_p->next_offset;
567 new_next_p->size = (uint32_t) (next_p->size - required_size);
568 JMEM_VALGRIND_NOACCESS_SPACE (new_next_p, sizeof (jmem_heap_free_t));
569 prev_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (new_next_p);
570 }
571
572 /* next_p will be marked as undefined space. */
573 JMEM_VALGRIND_RESIZE_SPACE (block_p, old_size, new_size);
574 ret_block_p = block_p;
575 }
576 else
577 {
578 JMEM_VALGRIND_NOACCESS_SPACE (next_p, sizeof (jmem_heap_free_t));
579 }
580
581 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
582 }
583 /*
584 * Check if block can be extended at the front.
585 * This is less optimal because we need to copy the data, but still better than allocting a new block.
586 */
587 else if (jmem_heap_get_region_end (prev_p) == block_p)
588 {
589 if (required_size <= prev_p->size)
590 {
591 if (required_size == prev_p->size)
592 {
593 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
594 prev_p = jmem_heap_find_prev (prev_p);
595 JMEM_VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
596 prev_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (next_p);
597 }
598 else
599 {
600 prev_p->size = (uint32_t) (prev_p->size - required_size);
601 }
602
603 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
604
605 ret_block_p = (uint8_t *) block_p - required_size;
606
607 /* Mark the the new block as undefined so that we are able to write to it. */
608 JMEM_VALGRIND_UNDEFINED_SPACE (ret_block_p, old_size);
609 /* The blocks are likely to overlap, so mark the old block as defined memory again. */
610 JMEM_VALGRIND_DEFINED_SPACE (block_p, old_size);
611 memmove (ret_block_p, block_p, old_size);
612
613 JMEM_VALGRIND_FREELIKE_SPACE (block_p);
614 JMEM_VALGRIND_MALLOCLIKE_SPACE (ret_block_p, new_size);
615 JMEM_VALGRIND_DEFINED_SPACE (ret_block_p, old_size);
616 }
617 else
618 {
619 JMEM_VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
620 }
621 }
622
623 if (ret_block_p != NULL)
624 {
625 /* Managed to extend the block. Update memory usage and the skip pointer. */
626 JERRY_CONTEXT (jmem_heap_list_skip_p) = prev_p;
627 JERRY_CONTEXT (jmem_heap_allocated_size) += required_size;
628
629 while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
630 {
631 JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
632 }
633 }
634 else
635 {
636 /* Could not extend block. Allocate new region and copy the data. */
637 /* jmem_heap_alloc_block_internal will adjust the allocated_size, but insert_block will not,
638 so we reduce it here first, so that the limit calculation remains consistent. */
639 JERRY_CONTEXT (jmem_heap_allocated_size) -= aligned_old_size;
640 ret_block_p = jmem_heap_alloc_block_internal (new_size);
641
642 /* jmem_heap_alloc_block_internal may trigger garbage collection, which can create new free blocks
643 * in the heap structure, so we need to look up the previous block again. */
644 prev_p = jmem_heap_find_prev (block_p);
645
646 memcpy (ret_block_p, block_p, old_size);
647 jmem_heap_insert_block (block_p, prev_p, aligned_old_size);
648 /* jmem_heap_alloc_block_internal will call JMEM_VALGRIND_MALLOCLIKE_SPACE */
649 JMEM_VALGRIND_FREELIKE_SPACE (block_p);
650 }
651
652 JMEM_HEAP_STAT_FREE (old_size);
653 JMEM_HEAP_STAT_ALLOC (new_size);
654 return ret_block_p;
655#else /* ENABLED (JERRY_SYSTEM_ALLOCATOR) */
656 const size_t required_size = new_size - old_size;
657
658#if !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC)
659 if (JERRY_CONTEXT (jmem_heap_allocated_size) + required_size >= JERRY_CONTEXT (jmem_heap_limit))
660 {
661 ecma_free_unused_memory (JMEM_PRESSURE_LOW);
662 }
663#else /* !ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC) */
664 ecma_gc_run ();
665#endif /* ENABLED (JERRY_MEM_GC_BEFORE_EACH_ALLOC) */
666
667 JERRY_CONTEXT (jmem_heap_allocated_size) += required_size;
668
669 while (JERRY_CONTEXT (jmem_heap_allocated_size) >= JERRY_CONTEXT (jmem_heap_limit))
670 {
671 JERRY_CONTEXT (jmem_heap_limit) += CONFIG_GC_LIMIT;
672 }
673
674 while (JERRY_CONTEXT (jmem_heap_allocated_size) + CONFIG_GC_LIMIT <= JERRY_CONTEXT (jmem_heap_limit))
675 {
676 JERRY_CONTEXT (jmem_heap_limit) -= CONFIG_GC_LIMIT;
677 }
678
679 JMEM_HEAP_STAT_FREE (old_size);
680 JMEM_HEAP_STAT_ALLOC (new_size);
681 return realloc (ptr, new_size);
682#endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
683} /* jmem_heap_realloc_block */
684
685/**
686 * Free memory block
687 */
688extern inline void JERRY_ATTR_HOT JERRY_ATTR_ALWAYS_INLINE
689jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the block */
690 const size_t size) /**< size of allocated region */
691{
692 jmem_heap_free_block_internal (ptr, size);
693 JMEM_HEAP_STAT_FREE (size);
694 return;
695} /* jmem_heap_free_block */
696
697#ifndef JERRY_NDEBUG
698/**
699 * Check whether the pointer points to the heap
700 *
701 * Note:
702 * the routine should be used only for assertion checks
703 *
704 * @return true - if pointer points to the heap,
705 * false - otherwise
706 */
707bool
708jmem_is_heap_pointer (const void *pointer) /**< pointer */
709{
710#if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
711 return ((uint8_t *) pointer >= JERRY_HEAP_CONTEXT (area)
712 && (uint8_t *) pointer <= (JERRY_HEAP_CONTEXT (area) + JMEM_HEAP_AREA_SIZE));
713#else /* ENABLED (JERRY_SYSTEM_ALLOCATOR) */
714 JERRY_UNUSED (pointer);
715 return true;
716#endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
717} /* jmem_is_heap_pointer */
718#endif /* !JERRY_NDEBUG */
719
720#if ENABLED (JERRY_MEM_STATS)
721/**
722 * Get heap memory usage statistics
723 */
724void
725jmem_heap_get_stats (jmem_heap_stats_t *out_heap_stats_p) /**< [out] heap stats */
726{
727 JERRY_ASSERT (out_heap_stats_p != NULL);
728
729 *out_heap_stats_p = JERRY_CONTEXT (jmem_heap_stats);
730} /* jmem_heap_get_stats */
731
732/**
733 * Print heap memory usage statistics
734 */
735void
736jmem_heap_stats_print (void)
737{
738 jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
739
740 JERRY_DEBUG_MSG ("Heap stats:\n");
741#if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
742 JERRY_DEBUG_MSG (" Heap size = %zu bytes\n",
743 heap_stats->size);
744#endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
745 JERRY_DEBUG_MSG (" Allocated = %zu bytes\n"
746 " Peak allocated = %zu bytes\n"
747 " Waste = %zu bytes\n"
748 " Peak waste = %zu bytes\n"
749 " Allocated byte code data = %zu bytes\n"
750 " Peak allocated byte code data = %zu bytes\n"
751 " Allocated string data = %zu bytes\n"
752 " Peak allocated string data = %zu bytes\n"
753 " Allocated object data = %zu bytes\n"
754 " Peak allocated object data = %zu bytes\n"
755 " Allocated property data = %zu bytes\n"
756 " Peak allocated property data = %zu bytes\n",
757 heap_stats->allocated_bytes,
758 heap_stats->peak_allocated_bytes,
759 heap_stats->waste_bytes,
760 heap_stats->peak_waste_bytes,
761 heap_stats->byte_code_bytes,
762 heap_stats->peak_byte_code_bytes,
763 heap_stats->string_bytes,
764 heap_stats->peak_string_bytes,
765 heap_stats->object_bytes,
766 heap_stats->peak_object_bytes,
767 heap_stats->property_bytes,
768 heap_stats->peak_property_bytes);
769} /* jmem_heap_stats_print */
770
771/**
772 * Initalize heap memory usage statistics account structure
773 */
774void
775jmem_heap_stat_init (void)
776{
777#if !ENABLED (JERRY_SYSTEM_ALLOCATOR)
778 JERRY_CONTEXT (jmem_heap_stats).size = JMEM_HEAP_AREA_SIZE;
779#endif /* !ENABLED (JERRY_SYSTEM_ALLOCATOR) */
780} /* jmem_heap_stat_init */
781
782/**
783 * Account allocation
784 */
785void
786jmem_heap_stat_alloc (size_t size) /**< Size of allocated block */
787{
788 const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
789 const size_t waste_bytes = aligned_size - size;
790
791 jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
792
793 heap_stats->allocated_bytes += aligned_size;
794 heap_stats->waste_bytes += waste_bytes;
795
796 if (heap_stats->allocated_bytes > heap_stats->peak_allocated_bytes)
797 {
798 heap_stats->peak_allocated_bytes = heap_stats->allocated_bytes;
799 }
800
801 if (heap_stats->waste_bytes > heap_stats->peak_waste_bytes)
802 {
803 heap_stats->peak_waste_bytes = heap_stats->waste_bytes;
804 }
805} /* jmem_heap_stat_alloc */
806
807/**
808 * Account freeing
809 */
810void
811jmem_heap_stat_free (size_t size) /**< Size of freed block */
812{
813 const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
814 const size_t waste_bytes = aligned_size - size;
815
816 jmem_heap_stats_t *heap_stats = &JERRY_CONTEXT (jmem_heap_stats);
817
818 heap_stats->allocated_bytes -= aligned_size;
819 heap_stats->waste_bytes -= waste_bytes;
820} /* jmem_heap_stat_free */
821
822#endif /* ENABLED (JERRY_MEM_STATS) */
823
824/**
825 * @}
826 * @}
827 */
828