| 1 | /* | 
|---|
| 2 | * Copyright (c) 2013, 2019, Oracle and/or its affiliates. All rights reserved. | 
|---|
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. | 
|---|
| 4 | * | 
|---|
| 5 | * This code is free software; you can redistribute it and/or modify it | 
|---|
| 6 | * under the terms of the GNU General Public License version 2 only, as | 
|---|
| 7 | * published by the Free Software Foundation. | 
|---|
| 8 | * | 
|---|
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT | 
|---|
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|---|
| 11 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License | 
|---|
| 12 | * version 2 for more details (a copy is included in the LICENSE file that | 
|---|
| 13 | * accompanied this code). | 
|---|
| 14 | * | 
|---|
| 15 | * You should have received a copy of the GNU General Public License version | 
|---|
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, | 
|---|
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. | 
|---|
| 18 | * | 
|---|
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA | 
|---|
| 20 | * or visit www.oracle.com if you need additional information or have any | 
|---|
| 21 | * questions. | 
|---|
| 22 | * | 
|---|
| 23 | */ | 
|---|
| 24 |  | 
|---|
| 25 | #ifndef SHARE_MEMORY_PADDED_INLINE_HPP | 
|---|
| 26 | #define SHARE_MEMORY_PADDED_INLINE_HPP | 
|---|
| 27 |  | 
|---|
| 28 | #include "memory/allocation.inline.hpp" | 
|---|
| 29 | #include "memory/padded.hpp" | 
|---|
| 30 | #include "utilities/align.hpp" | 
|---|
| 31 | #include "utilities/debug.hpp" | 
|---|
| 32 | #include "utilities/globalDefinitions.hpp" | 
|---|
| 33 |  | 
|---|
| 34 | // Creates an aligned padded array. | 
|---|
| 35 | // The memory can't be deleted since the raw memory chunk is not returned. | 
|---|
| 36 | template <class T, MEMFLAGS flags, size_t alignment> | 
|---|
| 37 | PaddedEnd<T>* PaddedArray<T, flags, alignment>::create_unfreeable(uint length) { | 
|---|
| 38 | // Check that the PaddedEnd class works as intended. | 
|---|
| 39 | STATIC_ASSERT(is_aligned_(sizeof(PaddedEnd<T>), alignment)); | 
|---|
| 40 |  | 
|---|
| 41 | // Allocate a chunk of memory large enough to allow for some alignment. | 
|---|
| 42 | void* chunk = AllocateHeap(length * sizeof(PaddedEnd<T, alignment>) + alignment, flags); | 
|---|
| 43 |  | 
|---|
| 44 | // Make the initial alignment. | 
|---|
| 45 | PaddedEnd<T>* aligned_padded_array = (PaddedEnd<T>*)align_up(chunk, alignment); | 
|---|
| 46 |  | 
|---|
| 47 | // Call the default constructor for each element. | 
|---|
| 48 | for (uint i = 0; i < length; i++) { | 
|---|
| 49 | ::new (&aligned_padded_array[i]) T(); | 
|---|
| 50 | } | 
|---|
| 51 |  | 
|---|
| 52 | return aligned_padded_array; | 
|---|
| 53 | } | 
|---|
| 54 |  | 
|---|
| 55 | template <class T, MEMFLAGS flags, size_t alignment> | 
|---|
| 56 | T** Padded2DArray<T, flags, alignment>::create_unfreeable(uint rows, uint columns, size_t* allocation_size) { | 
|---|
| 57 | // Calculate and align the size of the first dimension's table. | 
|---|
| 58 | size_t table_size = align_up(rows * sizeof(T*), alignment); | 
|---|
| 59 | // The size of the separate rows. | 
|---|
| 60 | size_t row_size = align_up(columns * sizeof(T), alignment); | 
|---|
| 61 | // Total size consists of the indirection table plus the rows. | 
|---|
| 62 | size_t total_size = table_size + rows * row_size + alignment; | 
|---|
| 63 |  | 
|---|
| 64 | // Allocate a chunk of memory large enough to allow alignment of the chunk. | 
|---|
| 65 | void* chunk = MmapArrayAllocator<uint8_t>::allocate(total_size, flags); | 
|---|
| 66 | // Clear the allocated memory. | 
|---|
| 67 | // Align the chunk of memory. | 
|---|
| 68 | T** result = (T**)align_up(chunk, alignment); | 
|---|
| 69 | void* data_start = (void*)((uintptr_t)result + table_size); | 
|---|
| 70 |  | 
|---|
| 71 | // Fill in the row table. | 
|---|
| 72 | for (size_t i = 0; i < rows; i++) { | 
|---|
| 73 | result[i] = (T*)((uintptr_t)data_start + i * row_size); | 
|---|
| 74 | } | 
|---|
| 75 |  | 
|---|
| 76 | if (allocation_size != NULL) { | 
|---|
| 77 | *allocation_size = total_size; | 
|---|
| 78 | } | 
|---|
| 79 |  | 
|---|
| 80 | return result; | 
|---|
| 81 | } | 
|---|
| 82 |  | 
|---|
| 83 | template <class T, MEMFLAGS flags, size_t alignment> | 
|---|
| 84 | T* PaddedPrimitiveArray<T, flags, alignment>::create_unfreeable(size_t length) { | 
|---|
| 85 | // Allocate a chunk of memory large enough to allow for some alignment. | 
|---|
| 86 | void* chunk = AllocateHeap(length * sizeof(T) + alignment, flags); | 
|---|
| 87 |  | 
|---|
| 88 | memset(chunk, 0, length * sizeof(T) + alignment); | 
|---|
| 89 |  | 
|---|
| 90 | return (T*)align_up(chunk, alignment); | 
|---|
| 91 | } | 
|---|
| 92 |  | 
|---|
| 93 | #endif // SHARE_MEMORY_PADDED_INLINE_HPP | 
|---|
| 94 |  | 
|---|