1 | /* |
2 | * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * Copyright (c) 2018 SAP SE. All rights reserved. |
4 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
5 | * |
6 | * This code is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License version 2 only, as |
8 | * published by the Free Software Foundation. |
9 | * |
10 | * This code is distributed in the hope that it will be useful, but WITHOUT |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
13 | * version 2 for more details (a copy is included in the LICENSE file that |
14 | * accompanied this code). |
15 | * |
16 | * You should have received a copy of the GNU General Public License version |
17 | * 2 along with this work; if not, write to the Free Software Foundation, |
18 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
19 | * |
20 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
21 | * or visit www.oracle.com if you need additional information or have any |
22 | * questions. |
23 | * |
24 | */ |
25 | |
26 | #ifndef SHARE_MEMORY_METASPACE_OCCUPANCYMAP_HPP |
27 | #define SHARE_MEMORY_METASPACE_OCCUPANCYMAP_HPP |
28 | |
29 | #include "memory/allocation.hpp" |
30 | #include "utilities/debug.hpp" |
31 | #include "utilities/globalDefinitions.hpp" |
32 | |
33 | |
34 | namespace metaspace { |
35 | |
36 | class Metachunk; |
37 | |
38 | // Helper for Occupancy Bitmap. A type trait to give an all-bits-are-one-unsigned constant. |
39 | template <typename T> struct all_ones { static const T value; }; |
40 | template <> struct all_ones <uint64_t> { static const uint64_t value = 0xFFFFFFFFFFFFFFFFULL; }; |
41 | template <> struct all_ones <uint32_t> { static const uint32_t value = 0xFFFFFFFF; }; |
42 | |
43 | // The OccupancyMap is a bitmap which, for a given VirtualSpaceNode, |
44 | // keeps information about |
45 | // - where a chunk starts |
46 | // - whether a chunk is in-use or free |
47 | // A bit in this bitmap represents one range of memory in the smallest |
48 | // chunk size (SpecializedChunk or ClassSpecializedChunk). |
49 | class OccupancyMap : public CHeapObj<mtInternal> { |
50 | |
51 | // The address range this map covers. |
52 | const MetaWord* const _reference_address; |
53 | const size_t _word_size; |
54 | |
55 | // The word size of a specialized chunk, aka the number of words one |
56 | // bit in this map represents. |
57 | const size_t _smallest_chunk_word_size; |
58 | |
59 | // map data |
60 | // Data are organized in two bit layers: |
61 | // The first layer is the chunk-start-map. Here, a bit is set to mark |
62 | // the corresponding region as the head of a chunk. |
63 | // The second layer is the in-use-map. Here, a set bit indicates that |
64 | // the corresponding belongs to a chunk which is in use. |
65 | uint8_t* _map[2]; |
66 | |
67 | enum { layer_chunk_start_map = 0, layer_in_use_map = 1 }; |
68 | |
69 | // length, in bytes, of bitmap data |
70 | size_t _map_size; |
71 | |
72 | // Returns true if bit at position pos at bit-layer layer is set. |
73 | bool get_bit_at_position(unsigned pos, unsigned layer) const { |
74 | assert(layer == 0 || layer == 1, "Invalid layer %d" , layer); |
75 | const unsigned byteoffset = pos / 8; |
76 | assert(byteoffset < _map_size, |
77 | "invalid byte offset (%u), map size is " SIZE_FORMAT "." , byteoffset, _map_size); |
78 | const unsigned mask = 1 << (pos % 8); |
79 | return (_map[layer][byteoffset] & mask) > 0; |
80 | } |
81 | |
82 | // Changes bit at position pos at bit-layer layer to value v. |
83 | void set_bit_at_position(unsigned pos, unsigned layer, bool v) { |
84 | assert(layer == 0 || layer == 1, "Invalid layer %d" , layer); |
85 | const unsigned byteoffset = pos / 8; |
86 | assert(byteoffset < _map_size, |
87 | "invalid byte offset (%u), map size is " SIZE_FORMAT "." , byteoffset, _map_size); |
88 | const unsigned mask = 1 << (pos % 8); |
89 | if (v) { |
90 | _map[layer][byteoffset] |= mask; |
91 | } else { |
92 | _map[layer][byteoffset] &= ~mask; |
93 | } |
94 | } |
95 | |
96 | // Optimized case of is_any_bit_set_in_region for 32/64bit aligned access: |
97 | // pos is 32/64 aligned and num_bits is 32/64. |
98 | // This is the typical case when coalescing to medium chunks, whose size is |
99 | // 32 or 64 times the specialized chunk size (depending on class or non class |
100 | // case), so they occupy 64 bits which should be 64bit aligned, because |
101 | // chunks are chunk-size aligned. |
102 | template <typename T> |
103 | bool is_any_bit_set_in_region_3264(unsigned pos, unsigned num_bits, unsigned layer) const { |
104 | assert(_map_size > 0, "not initialized" ); |
105 | assert(layer == 0 || layer == 1, "Invalid layer %d." , layer); |
106 | assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned (%u)." , pos); |
107 | assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u)." , num_bits); |
108 | const size_t byteoffset = pos / 8; |
109 | assert(byteoffset <= (_map_size - sizeof(T)), |
110 | "Invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT "." , byteoffset, _map_size); |
111 | const T w = *(T*)(_map[layer] + byteoffset); |
112 | return w > 0 ? true : false; |
113 | } |
114 | |
115 | // Returns true if any bit in region [pos1, pos1 + num_bits) is set in bit-layer layer. |
116 | bool is_any_bit_set_in_region(unsigned pos, unsigned num_bits, unsigned layer) const { |
117 | if (pos % 32 == 0 && num_bits == 32) { |
118 | return is_any_bit_set_in_region_3264<uint32_t>(pos, num_bits, layer); |
119 | } else if (pos % 64 == 0 && num_bits == 64) { |
120 | return is_any_bit_set_in_region_3264<uint64_t>(pos, num_bits, layer); |
121 | } else { |
122 | for (unsigned n = 0; n < num_bits; n ++) { |
123 | if (get_bit_at_position(pos + n, layer)) { |
124 | return true; |
125 | } |
126 | } |
127 | } |
128 | return false; |
129 | } |
130 | |
131 | // Returns true if any bit in region [p, p+word_size) is set in bit-layer layer. |
132 | bool is_any_bit_set_in_region(MetaWord* p, size_t word_size, unsigned layer) const { |
133 | assert(word_size % _smallest_chunk_word_size == 0, |
134 | "Region size " SIZE_FORMAT " not a multiple of smallest chunk size." , word_size); |
135 | const unsigned pos = get_bitpos_for_address(p); |
136 | const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size); |
137 | return is_any_bit_set_in_region(pos, num_bits, layer); |
138 | } |
139 | |
140 | // Optimized case of set_bits_of_region for 32/64bit aligned access: |
141 | // pos is 32/64 aligned and num_bits is 32/64. |
142 | // This is the typical case when coalescing to medium chunks, whose size |
143 | // is 32 or 64 times the specialized chunk size (depending on class or non |
144 | // class case), so they occupy 64 bits which should be 64bit aligned, |
145 | // because chunks are chunk-size aligned. |
146 | template <typename T> |
147 | void set_bits_of_region_T(unsigned pos, unsigned num_bits, unsigned layer, bool v) { |
148 | assert(pos % (sizeof(T) * 8) == 0, "Bit position must be aligned to %u (%u)." , |
149 | (unsigned)(sizeof(T) * 8), pos); |
150 | assert(num_bits == (sizeof(T) * 8), "Number of bits incorrect (%u), expected %u." , |
151 | num_bits, (unsigned)(sizeof(T) * 8)); |
152 | const size_t byteoffset = pos / 8; |
153 | assert(byteoffset <= (_map_size - sizeof(T)), |
154 | "invalid byte offset (" SIZE_FORMAT "), map size is " SIZE_FORMAT "." , byteoffset, _map_size); |
155 | T* const pw = (T*)(_map[layer] + byteoffset); |
156 | *pw = v ? all_ones<T>::value : (T) 0; |
157 | } |
158 | |
159 | // Set all bits in a region starting at pos to a value. |
160 | void set_bits_of_region(unsigned pos, unsigned num_bits, unsigned layer, bool v) { |
161 | assert(_map_size > 0, "not initialized" ); |
162 | assert(layer == 0 || layer == 1, "Invalid layer %d." , layer); |
163 | if (pos % 32 == 0 && num_bits == 32) { |
164 | set_bits_of_region_T<uint32_t>(pos, num_bits, layer, v); |
165 | } else if (pos % 64 == 0 && num_bits == 64) { |
166 | set_bits_of_region_T<uint64_t>(pos, num_bits, layer, v); |
167 | } else { |
168 | for (unsigned n = 0; n < num_bits; n ++) { |
169 | set_bit_at_position(pos + n, layer, v); |
170 | } |
171 | } |
172 | } |
173 | |
174 | // Helper: sets all bits in a region [p, p+word_size). |
175 | void set_bits_of_region(MetaWord* p, size_t word_size, unsigned layer, bool v) { |
176 | assert(word_size % _smallest_chunk_word_size == 0, |
177 | "Region size " SIZE_FORMAT " not a multiple of smallest chunk size." , word_size); |
178 | const unsigned pos = get_bitpos_for_address(p); |
179 | const unsigned num_bits = (unsigned) (word_size / _smallest_chunk_word_size); |
180 | set_bits_of_region(pos, num_bits, layer, v); |
181 | } |
182 | |
183 | // Helper: given an address, return the bit position representing that address. |
184 | unsigned get_bitpos_for_address(const MetaWord* p) const { |
185 | assert(_reference_address != NULL, "not initialized" ); |
186 | assert(p >= _reference_address && p < _reference_address + _word_size, |
187 | "Address %p out of range for occupancy map [%p..%p)." , |
188 | p, _reference_address, _reference_address + _word_size); |
189 | assert(is_aligned(p, _smallest_chunk_word_size * sizeof(MetaWord)), |
190 | "Address not aligned (%p)." , p); |
191 | const ptrdiff_t d = (p - _reference_address) / _smallest_chunk_word_size; |
192 | assert(d >= 0 && (size_t)d < _map_size * 8, "Sanity." ); |
193 | return (unsigned) d; |
194 | } |
195 | |
196 | public: |
197 | |
198 | OccupancyMap(const MetaWord* reference_address, size_t word_size, size_t smallest_chunk_word_size); |
199 | ~OccupancyMap(); |
200 | |
201 | // Returns true if at address x a chunk is starting. |
202 | bool chunk_starts_at_address(MetaWord* p) const { |
203 | const unsigned pos = get_bitpos_for_address(p); |
204 | return get_bit_at_position(pos, layer_chunk_start_map); |
205 | } |
206 | |
207 | void set_chunk_starts_at_address(MetaWord* p, bool v) { |
208 | const unsigned pos = get_bitpos_for_address(p); |
209 | set_bit_at_position(pos, layer_chunk_start_map, v); |
210 | } |
211 | |
212 | // Removes all chunk-start-bits inside a region, typically as a |
213 | // result of a chunk merge. |
214 | void wipe_chunk_start_bits_in_region(MetaWord* p, size_t word_size) { |
215 | set_bits_of_region(p, word_size, layer_chunk_start_map, false); |
216 | } |
217 | |
218 | // Returns true if there are life (in use) chunks in the region limited |
219 | // by [p, p+word_size). |
220 | bool is_region_in_use(MetaWord* p, size_t word_size) const { |
221 | return is_any_bit_set_in_region(p, word_size, layer_in_use_map); |
222 | } |
223 | |
224 | // Marks the region starting at p with the size word_size as in use |
225 | // or free, depending on v. |
226 | void set_region_in_use(MetaWord* p, size_t word_size, bool v) { |
227 | set_bits_of_region(p, word_size, layer_in_use_map, v); |
228 | } |
229 | |
230 | // Verify occupancy map for the address range [from, to). |
231 | // We need to tell it the address range, because the memory the |
232 | // occupancy map is covering may not be fully comitted yet. |
233 | DEBUG_ONLY(void verify(MetaWord* from, MetaWord* to);) |
234 | |
235 | // Verify that a given chunk is correctly accounted for in the bitmap. |
236 | DEBUG_ONLY(void verify_for_chunk(Metachunk* chunk);) |
237 | |
238 | }; |
239 | |
240 | } // namespace metaspace |
241 | |
242 | #endif // SHARE_MEMORY_METASPACE_OCCUPANCYMAP_HPP |
243 | |