1 | /* |
2 | * Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | |
26 | #include "precompiled.hpp" |
27 | #include "logging/log.hpp" |
28 | #include "logging/logStream.hpp" |
29 | #include "memory/metaspace.hpp" |
30 | #include "memory/metaspace/chunkManager.hpp" |
31 | #include "memory/metaspace/metachunk.hpp" |
32 | #include "memory/metaspace/metaspaceCommon.hpp" |
33 | #include "memory/metaspace/virtualSpaceList.hpp" |
34 | #include "memory/metaspace/virtualSpaceNode.hpp" |
35 | #include "runtime/orderAccess.hpp" |
36 | #include "runtime/mutexLocker.hpp" |
37 | #include "runtime/safepoint.hpp" |
38 | |
39 | namespace metaspace { |
40 | |
41 | |
42 | VirtualSpaceList::~VirtualSpaceList() { |
43 | VirtualSpaceListIterator iter(virtual_space_list()); |
44 | while (iter.repeat()) { |
45 | VirtualSpaceNode* vsl = iter.get_next(); |
46 | delete vsl; |
47 | } |
48 | } |
49 | |
50 | void VirtualSpaceList::inc_reserved_words(size_t v) { |
51 | assert_lock_strong(MetaspaceExpand_lock); |
52 | _reserved_words = _reserved_words + v; |
53 | } |
54 | void VirtualSpaceList::dec_reserved_words(size_t v) { |
55 | assert_lock_strong(MetaspaceExpand_lock); |
56 | _reserved_words = _reserved_words - v; |
57 | } |
58 | |
59 | #define assert_committed_below_limit() \ |
60 | assert(MetaspaceUtils::committed_bytes() <= MaxMetaspaceSize, \ |
61 | "Too much committed memory. Committed: " SIZE_FORMAT \ |
62 | " limit (MaxMetaspaceSize): " SIZE_FORMAT, \ |
63 | MetaspaceUtils::committed_bytes(), MaxMetaspaceSize); |
64 | |
65 | void VirtualSpaceList::inc_committed_words(size_t v) { |
66 | assert_lock_strong(MetaspaceExpand_lock); |
67 | _committed_words = _committed_words + v; |
68 | |
69 | assert_committed_below_limit(); |
70 | } |
71 | void VirtualSpaceList::dec_committed_words(size_t v) { |
72 | assert_lock_strong(MetaspaceExpand_lock); |
73 | _committed_words = _committed_words - v; |
74 | |
75 | assert_committed_below_limit(); |
76 | } |
77 | |
78 | void VirtualSpaceList::inc_virtual_space_count() { |
79 | assert_lock_strong(MetaspaceExpand_lock); |
80 | _virtual_space_count++; |
81 | } |
82 | |
83 | void VirtualSpaceList::dec_virtual_space_count() { |
84 | assert_lock_strong(MetaspaceExpand_lock); |
85 | _virtual_space_count--; |
86 | } |
87 | |
88 | // Walk the list of VirtualSpaceNodes and delete |
89 | // nodes with a 0 container_count. Remove Metachunks in |
90 | // the node from their respective freelists. |
91 | void VirtualSpaceList::purge(ChunkManager* chunk_manager) { |
92 | assert_lock_strong(MetaspaceExpand_lock); |
93 | // Don't use a VirtualSpaceListIterator because this |
94 | // list is being changed and a straightforward use of an iterator is not safe. |
95 | VirtualSpaceNode* prev_vsl = virtual_space_list(); |
96 | VirtualSpaceNode* next_vsl = prev_vsl; |
97 | int num_purged_nodes = 0; |
98 | while (next_vsl != NULL) { |
99 | VirtualSpaceNode* vsl = next_vsl; |
100 | DEBUG_ONLY(vsl->verify(false);) |
101 | next_vsl = vsl->next(); |
102 | // Don't free the current virtual space since it will likely |
103 | // be needed soon. |
104 | if (vsl->container_count() == 0 && vsl != current_virtual_space()) { |
105 | log_trace(gc, metaspace, freelist)("Purging VirtualSpaceNode " PTR_FORMAT " (capacity: " SIZE_FORMAT |
106 | ", used: " SIZE_FORMAT ")." , p2i(vsl), vsl->capacity_words_in_vs(), vsl->used_words_in_vs()); |
107 | DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_purged)); |
108 | // Unlink it from the list |
109 | if (prev_vsl == vsl) { |
110 | // This is the case of the current node being the first node. |
111 | assert(vsl == virtual_space_list(), "Expected to be the first node" ); |
112 | set_virtual_space_list(vsl->next()); |
113 | } else { |
114 | prev_vsl->set_next(vsl->next()); |
115 | } |
116 | |
117 | vsl->purge(chunk_manager); |
118 | dec_reserved_words(vsl->reserved_words()); |
119 | dec_committed_words(vsl->committed_words()); |
120 | dec_virtual_space_count(); |
121 | delete vsl; |
122 | num_purged_nodes ++; |
123 | } else { |
124 | prev_vsl = vsl; |
125 | } |
126 | } |
127 | |
128 | // Verify list |
129 | #ifdef ASSERT |
130 | if (num_purged_nodes > 0) { |
131 | verify(false); |
132 | } |
133 | #endif |
134 | } |
135 | |
136 | |
137 | // This function looks at the mmap regions in the metaspace without locking. |
138 | // The chunks are added with store ordering and not deleted except for at |
139 | // unloading time during a safepoint. |
140 | VirtualSpaceNode* VirtualSpaceList::find_enclosing_space(const void* ptr) { |
141 | // List should be stable enough to use an iterator here because removing virtual |
142 | // space nodes is only allowed at a safepoint. |
143 | if (is_within_envelope((address)ptr)) { |
144 | VirtualSpaceListIterator iter(virtual_space_list()); |
145 | while (iter.repeat()) { |
146 | VirtualSpaceNode* vsn = iter.get_next(); |
147 | if (vsn->contains(ptr)) { |
148 | return vsn; |
149 | } |
150 | } |
151 | } |
152 | return NULL; |
153 | } |
154 | |
155 | void VirtualSpaceList::retire_current_virtual_space() { |
156 | assert_lock_strong(MetaspaceExpand_lock); |
157 | |
158 | VirtualSpaceNode* vsn = current_virtual_space(); |
159 | |
160 | ChunkManager* cm = is_class() ? Metaspace::chunk_manager_class() : |
161 | Metaspace::chunk_manager_metadata(); |
162 | |
163 | vsn->retire(cm); |
164 | } |
165 | |
166 | VirtualSpaceList::VirtualSpaceList(size_t word_size) : |
167 | _virtual_space_list(NULL), |
168 | _current_virtual_space(NULL), |
169 | _is_class(false), |
170 | _reserved_words(0), |
171 | _committed_words(0), |
172 | _virtual_space_count(0), |
173 | _envelope_lo((address)max_uintx), |
174 | _envelope_hi(NULL) { |
175 | MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); |
176 | create_new_virtual_space(word_size); |
177 | } |
178 | |
179 | VirtualSpaceList::VirtualSpaceList(ReservedSpace rs) : |
180 | _virtual_space_list(NULL), |
181 | _current_virtual_space(NULL), |
182 | _is_class(true), |
183 | _reserved_words(0), |
184 | _committed_words(0), |
185 | _virtual_space_count(0), |
186 | _envelope_lo((address)max_uintx), |
187 | _envelope_hi(NULL) { |
188 | MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag); |
189 | VirtualSpaceNode* class_entry = new VirtualSpaceNode(is_class(), rs); |
190 | bool succeeded = class_entry->initialize(); |
191 | if (succeeded) { |
192 | expand_envelope_to_include_node(class_entry); |
193 | // ensure lock-free iteration sees fully initialized node |
194 | OrderAccess::storestore(); |
195 | link_vs(class_entry); |
196 | } |
197 | } |
198 | |
199 | size_t VirtualSpaceList::free_bytes() { |
200 | return current_virtual_space()->free_words_in_vs() * BytesPerWord; |
201 | } |
202 | |
203 | // Allocate another meta virtual space and add it to the list. |
204 | bool VirtualSpaceList::create_new_virtual_space(size_t vs_word_size) { |
205 | assert_lock_strong(MetaspaceExpand_lock); |
206 | |
207 | if (is_class()) { |
208 | assert(false, "We currently don't support more than one VirtualSpace for" |
209 | " the compressed class space. The initialization of the" |
210 | " CCS uses another code path and should not hit this path." ); |
211 | return false; |
212 | } |
213 | |
214 | if (vs_word_size == 0) { |
215 | assert(false, "vs_word_size should always be at least _reserve_alignment large." ); |
216 | return false; |
217 | } |
218 | |
219 | // Reserve the space |
220 | size_t vs_byte_size = vs_word_size * BytesPerWord; |
221 | assert_is_aligned(vs_byte_size, Metaspace::reserve_alignment()); |
222 | |
223 | // Allocate the meta virtual space and initialize it. |
224 | VirtualSpaceNode* new_entry = new VirtualSpaceNode(is_class(), vs_byte_size); |
225 | if (!new_entry->initialize()) { |
226 | delete new_entry; |
227 | return false; |
228 | } else { |
229 | assert(new_entry->reserved_words() == vs_word_size, |
230 | "Reserved memory size differs from requested memory size" ); |
231 | expand_envelope_to_include_node(new_entry); |
232 | // ensure lock-free iteration sees fully initialized node |
233 | OrderAccess::storestore(); |
234 | link_vs(new_entry); |
235 | DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_vsnodes_created)); |
236 | return true; |
237 | } |
238 | |
239 | DEBUG_ONLY(verify(false);) |
240 | |
241 | } |
242 | |
243 | void VirtualSpaceList::link_vs(VirtualSpaceNode* new_entry) { |
244 | if (virtual_space_list() == NULL) { |
245 | set_virtual_space_list(new_entry); |
246 | } else { |
247 | current_virtual_space()->set_next(new_entry); |
248 | } |
249 | set_current_virtual_space(new_entry); |
250 | inc_reserved_words(new_entry->reserved_words()); |
251 | inc_committed_words(new_entry->committed_words()); |
252 | inc_virtual_space_count(); |
253 | #ifdef ASSERT |
254 | new_entry->mangle(); |
255 | #endif |
256 | LogTarget(Trace, gc, metaspace) lt; |
257 | if (lt.is_enabled()) { |
258 | LogStream ls(lt); |
259 | VirtualSpaceNode* vsl = current_virtual_space(); |
260 | ResourceMark rm; |
261 | vsl->print_on(&ls); |
262 | } |
263 | } |
264 | |
265 | bool VirtualSpaceList::expand_node_by(VirtualSpaceNode* node, |
266 | size_t min_words, |
267 | size_t preferred_words) { |
268 | size_t before = node->committed_words(); |
269 | |
270 | bool result = node->expand_by(min_words, preferred_words); |
271 | |
272 | size_t after = node->committed_words(); |
273 | |
274 | // after and before can be the same if the memory was pre-committed. |
275 | assert(after >= before, "Inconsistency" ); |
276 | inc_committed_words(after - before); |
277 | |
278 | return result; |
279 | } |
280 | |
281 | bool VirtualSpaceList::expand_by(size_t min_words, size_t preferred_words) { |
282 | assert_is_aligned(min_words, Metaspace::commit_alignment_words()); |
283 | assert_is_aligned(preferred_words, Metaspace::commit_alignment_words()); |
284 | assert(min_words <= preferred_words, "Invalid arguments" ); |
285 | |
286 | const char* const class_or_not = (is_class() ? "class" : "non-class" ); |
287 | |
288 | if (!MetaspaceGC::can_expand(min_words, this->is_class())) { |
289 | log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list." , |
290 | class_or_not); |
291 | return false; |
292 | } |
293 | |
294 | size_t allowed_expansion_words = MetaspaceGC::allowed_expansion(); |
295 | if (allowed_expansion_words < min_words) { |
296 | log_trace(gc, metaspace, freelist)("Cannot expand %s virtual space list (must try gc first)." , |
297 | class_or_not); |
298 | return false; |
299 | } |
300 | |
301 | size_t max_expansion_words = MIN2(preferred_words, allowed_expansion_words); |
302 | |
303 | // Commit more memory from the the current virtual space. |
304 | bool vs_expanded = expand_node_by(current_virtual_space(), |
305 | min_words, |
306 | max_expansion_words); |
307 | if (vs_expanded) { |
308 | log_trace(gc, metaspace, freelist)("Expanded %s virtual space list." , |
309 | class_or_not); |
310 | return true; |
311 | } |
312 | log_trace(gc, metaspace, freelist)("%s virtual space list: retire current node." , |
313 | class_or_not); |
314 | retire_current_virtual_space(); |
315 | |
316 | // Get another virtual space. |
317 | size_t grow_vs_words = MAX2((size_t)VirtualSpaceSize, preferred_words); |
318 | grow_vs_words = align_up(grow_vs_words, Metaspace::reserve_alignment_words()); |
319 | |
320 | if (create_new_virtual_space(grow_vs_words)) { |
321 | if (current_virtual_space()->is_pre_committed()) { |
322 | // The memory was pre-committed, so we are done here. |
323 | assert(min_words <= current_virtual_space()->committed_words(), |
324 | "The new VirtualSpace was pre-committed, so it" |
325 | "should be large enough to fit the alloc request." ); |
326 | return true; |
327 | } |
328 | |
329 | return expand_node_by(current_virtual_space(), |
330 | min_words, |
331 | max_expansion_words); |
332 | } |
333 | |
334 | return false; |
335 | } |
336 | |
337 | // Given a chunk, calculate the largest possible padding space which |
338 | // could be required when allocating it. |
339 | static size_t largest_possible_padding_size_for_chunk(size_t chunk_word_size, bool is_class) { |
340 | const ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class); |
341 | if (chunk_type != HumongousIndex) { |
342 | // Normal, non-humongous chunks are allocated at chunk size |
343 | // boundaries, so the largest padding space required would be that |
344 | // minus the smallest chunk size. |
345 | const size_t smallest_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk; |
346 | return chunk_word_size - smallest_chunk_size; |
347 | } else { |
348 | // Humongous chunks are allocated at smallest-chunksize |
349 | // boundaries, so there is no padding required. |
350 | return 0; |
351 | } |
352 | } |
353 | |
354 | |
355 | Metachunk* VirtualSpaceList::get_new_chunk(size_t chunk_word_size, size_t suggested_commit_granularity) { |
356 | |
357 | // Allocate a chunk out of the current virtual space. |
358 | Metachunk* next = current_virtual_space()->get_chunk_vs(chunk_word_size); |
359 | |
360 | if (next != NULL) { |
361 | return next; |
362 | } |
363 | |
364 | // The expand amount is currently only determined by the requested sizes |
365 | // and not how much committed memory is left in the current virtual space. |
366 | |
367 | // We must have enough space for the requested size and any |
368 | // additional reqired padding chunks. |
369 | const size_t size_for_padding = largest_possible_padding_size_for_chunk(chunk_word_size, this->is_class()); |
370 | |
371 | size_t min_word_size = align_up(chunk_word_size + size_for_padding, Metaspace::commit_alignment_words()); |
372 | size_t preferred_word_size = align_up(suggested_commit_granularity, Metaspace::commit_alignment_words()); |
373 | if (min_word_size >= preferred_word_size) { |
374 | // Can happen when humongous chunks are allocated. |
375 | preferred_word_size = min_word_size; |
376 | } |
377 | |
378 | bool expanded = expand_by(min_word_size, preferred_word_size); |
379 | if (expanded) { |
380 | next = current_virtual_space()->get_chunk_vs(chunk_word_size); |
381 | assert(next != NULL, "The allocation was expected to succeed after the expansion" ); |
382 | } |
383 | |
384 | return next; |
385 | } |
386 | |
387 | void VirtualSpaceList::print_on(outputStream* st, size_t scale) const { |
388 | st->print_cr(SIZE_FORMAT " nodes, current node: " PTR_FORMAT, |
389 | _virtual_space_count, p2i(_current_virtual_space)); |
390 | VirtualSpaceListIterator iter(virtual_space_list()); |
391 | while (iter.repeat()) { |
392 | st->cr(); |
393 | VirtualSpaceNode* node = iter.get_next(); |
394 | node->print_on(st, scale); |
395 | } |
396 | } |
397 | |
398 | void VirtualSpaceList::print_map(outputStream* st) const { |
399 | VirtualSpaceNode* list = virtual_space_list(); |
400 | VirtualSpaceListIterator iter(list); |
401 | unsigned i = 0; |
402 | while (iter.repeat()) { |
403 | st->print_cr("Node %u:" , i); |
404 | VirtualSpaceNode* node = iter.get_next(); |
405 | node->print_map(st, this->is_class()); |
406 | i ++; |
407 | } |
408 | } |
409 | |
410 | // Given a node, expand range such that it includes the node. |
411 | void VirtualSpaceList::expand_envelope_to_include_node(const VirtualSpaceNode* node) { |
412 | _envelope_lo = MIN2(_envelope_lo, (address)node->low_boundary()); |
413 | _envelope_hi = MAX2(_envelope_hi, (address)node->high_boundary()); |
414 | } |
415 | |
416 | |
417 | #ifdef ASSERT |
418 | void VirtualSpaceList::verify(bool slow) { |
419 | VirtualSpaceNode* list = virtual_space_list(); |
420 | VirtualSpaceListIterator iter(list); |
421 | size_t reserved = 0; |
422 | size_t committed = 0; |
423 | size_t node_count = 0; |
424 | while (iter.repeat()) { |
425 | VirtualSpaceNode* node = iter.get_next(); |
426 | if (slow) { |
427 | node->verify(true); |
428 | } |
429 | // Check that the node resides fully within our envelope. |
430 | assert((address)node->low_boundary() >= _envelope_lo && (address)node->high_boundary() <= _envelope_hi, |
431 | "Node " SIZE_FORMAT " [" PTR_FORMAT ", " PTR_FORMAT ") outside envelope [" PTR_FORMAT ", " PTR_FORMAT ")." , |
432 | node_count, p2i(node->low_boundary()), p2i(node->high_boundary()), p2i(_envelope_lo), p2i(_envelope_hi)); |
433 | reserved += node->reserved_words(); |
434 | committed += node->committed_words(); |
435 | node_count ++; |
436 | } |
437 | assert(reserved == reserved_words() && committed == committed_words() && node_count == _virtual_space_count, |
438 | "Mismatch: reserved real: " SIZE_FORMAT " expected: " SIZE_FORMAT |
439 | ", committed real: " SIZE_FORMAT " expected: " SIZE_FORMAT |
440 | ", node count real: " SIZE_FORMAT " expected: " SIZE_FORMAT "." , |
441 | reserved, reserved_words(), committed, committed_words(), |
442 | node_count, _virtual_space_count); |
443 | } |
444 | #endif // ASSERT |
445 | |
446 | } // namespace metaspace |
447 | |
448 | |