1/*
2 * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#include "precompiled.hpp"
26#include "aot/aotLoader.hpp"
27#include "classfile/classLoaderDataGraph.hpp"
28#include "gc/shared/collectedHeap.hpp"
29#include "logging/log.hpp"
30#include "logging/logStream.hpp"
31#include "memory/filemap.hpp"
32#include "memory/metaspace.hpp"
33#include "memory/metaspace/chunkManager.hpp"
34#include "memory/metaspace/metachunk.hpp"
35#include "memory/metaspace/metaspaceCommon.hpp"
36#include "memory/metaspace/printCLDMetaspaceInfoClosure.hpp"
37#include "memory/metaspace/spaceManager.hpp"
38#include "memory/metaspace/virtualSpaceList.hpp"
39#include "memory/metaspaceShared.hpp"
40#include "memory/metaspaceTracer.hpp"
41#include "memory/universe.hpp"
42#include "oops/compressedOops.hpp"
43#include "runtime/init.hpp"
44#include "runtime/orderAccess.hpp"
45#include "services/memTracker.hpp"
46#include "utilities/copy.hpp"
47#include "utilities/debug.hpp"
48#include "utilities/formatBuffer.hpp"
49#include "utilities/globalDefinitions.hpp"
50#include "utilities/vmError.hpp"
51
52
53using namespace metaspace;
54
55MetaWord* last_allocated = 0;
56
57size_t Metaspace::_compressed_class_space_size;
58const MetaspaceTracer* Metaspace::_tracer = NULL;
59
60DEBUG_ONLY(bool Metaspace::_frozen = false;)
61
62static const char* space_type_name(Metaspace::MetaspaceType t) {
63 const char* s = NULL;
64 switch (t) {
65 case Metaspace::StandardMetaspaceType: s = "Standard"; break;
66 case Metaspace::BootMetaspaceType: s = "Boot"; break;
67 case Metaspace::UnsafeAnonymousMetaspaceType: s = "UnsafeAnonymous"; break;
68 case Metaspace::ReflectionMetaspaceType: s = "Reflection"; break;
69 default: ShouldNotReachHere();
70 }
71 return s;
72}
73
74volatile size_t MetaspaceGC::_capacity_until_GC = 0;
75uint MetaspaceGC::_shrink_factor = 0;
76bool MetaspaceGC::_should_concurrent_collect = false;
77
78// BlockFreelist methods
79
80// VirtualSpaceNode methods
81
82// MetaspaceGC methods
83
84// VM_CollectForMetadataAllocation is the vm operation used to GC.
85// Within the VM operation after the GC the attempt to allocate the metadata
86// should succeed. If the GC did not free enough space for the metaspace
87// allocation, the HWM is increased so that another virtualspace will be
88// allocated for the metadata. With perm gen the increase in the perm
89// gen had bounds, MinMetaspaceExpansion and MaxMetaspaceExpansion. The
90// metaspace policy uses those as the small and large steps for the HWM.
91//
92// After the GC the compute_new_size() for MetaspaceGC is called to
93// resize the capacity of the metaspaces. The current implementation
94// is based on the flags MinMetaspaceFreeRatio and MaxMetaspaceFreeRatio used
95// to resize the Java heap by some GC's. New flags can be implemented
96// if really needed. MinMetaspaceFreeRatio is used to calculate how much
97// free space is desirable in the metaspace capacity to decide how much
98// to increase the HWM. MaxMetaspaceFreeRatio is used to decide how much
99// free space is desirable in the metaspace capacity before decreasing
100// the HWM.
101
102// Calculate the amount to increase the high water mark (HWM).
103// Increase by a minimum amount (MinMetaspaceExpansion) so that
104// another expansion is not requested too soon. If that is not
105// enough to satisfy the allocation, increase by MaxMetaspaceExpansion.
106// If that is still not enough, expand by the size of the allocation
107// plus some.
108size_t MetaspaceGC::delta_capacity_until_GC(size_t bytes) {
109 size_t min_delta = MinMetaspaceExpansion;
110 size_t max_delta = MaxMetaspaceExpansion;
111 size_t delta = align_up(bytes, Metaspace::commit_alignment());
112
113 if (delta <= min_delta) {
114 delta = min_delta;
115 } else if (delta <= max_delta) {
116 // Don't want to hit the high water mark on the next
117 // allocation so make the delta greater than just enough
118 // for this allocation.
119 delta = max_delta;
120 } else {
121 // This allocation is large but the next ones are probably not
122 // so increase by the minimum.
123 delta = delta + min_delta;
124 }
125
126 assert_is_aligned(delta, Metaspace::commit_alignment());
127
128 return delta;
129}
130
131size_t MetaspaceGC::capacity_until_GC() {
132 size_t value = OrderAccess::load_acquire(&_capacity_until_GC);
133 assert(value >= MetaspaceSize, "Not initialized properly?");
134 return value;
135}
136
137// Try to increase the _capacity_until_GC limit counter by v bytes.
138// Returns true if it succeeded. It may fail if either another thread
139// concurrently increased the limit or the new limit would be larger
140// than MaxMetaspaceSize.
141// On success, optionally returns new and old metaspace capacity in
142// new_cap_until_GC and old_cap_until_GC respectively.
143// On error, optionally sets can_retry to indicate whether if there is
144// actually enough space remaining to satisfy the request.
145bool MetaspaceGC::inc_capacity_until_GC(size_t v, size_t* new_cap_until_GC, size_t* old_cap_until_GC, bool* can_retry) {
146 assert_is_aligned(v, Metaspace::commit_alignment());
147
148 size_t old_capacity_until_GC = _capacity_until_GC;
149 size_t new_value = old_capacity_until_GC + v;
150
151 if (new_value < old_capacity_until_GC) {
152 // The addition wrapped around, set new_value to aligned max value.
153 new_value = align_down(max_uintx, Metaspace::commit_alignment());
154 }
155
156 if (new_value > MaxMetaspaceSize) {
157 if (can_retry != NULL) {
158 *can_retry = false;
159 }
160 return false;
161 }
162
163 if (can_retry != NULL) {
164 *can_retry = true;
165 }
166 size_t prev_value = Atomic::cmpxchg(new_value, &_capacity_until_GC, old_capacity_until_GC);
167
168 if (old_capacity_until_GC != prev_value) {
169 return false;
170 }
171
172 if (new_cap_until_GC != NULL) {
173 *new_cap_until_GC = new_value;
174 }
175 if (old_cap_until_GC != NULL) {
176 *old_cap_until_GC = old_capacity_until_GC;
177 }
178 return true;
179}
180
181size_t MetaspaceGC::dec_capacity_until_GC(size_t v) {
182 assert_is_aligned(v, Metaspace::commit_alignment());
183
184 return Atomic::sub(v, &_capacity_until_GC);
185}
186
187void MetaspaceGC::initialize() {
188 // Set the high-water mark to MaxMetapaceSize during VM initializaton since
189 // we can't do a GC during initialization.
190 _capacity_until_GC = MaxMetaspaceSize;
191}
192
193void MetaspaceGC::post_initialize() {
194 // Reset the high-water mark once the VM initialization is done.
195 _capacity_until_GC = MAX2(MetaspaceUtils::committed_bytes(), MetaspaceSize);
196}
197
198bool MetaspaceGC::can_expand(size_t word_size, bool is_class) {
199 // Check if the compressed class space is full.
200 if (is_class && Metaspace::using_class_space()) {
201 size_t class_committed = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
202 if (class_committed + word_size * BytesPerWord > CompressedClassSpaceSize) {
203 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (CompressedClassSpaceSize = " SIZE_FORMAT " words)",
204 (is_class ? "class" : "non-class"), word_size, CompressedClassSpaceSize / sizeof(MetaWord));
205 return false;
206 }
207 }
208
209 // Check if the user has imposed a limit on the metaspace memory.
210 size_t committed_bytes = MetaspaceUtils::committed_bytes();
211 if (committed_bytes + word_size * BytesPerWord > MaxMetaspaceSize) {
212 log_trace(gc, metaspace, freelist)("Cannot expand %s metaspace by " SIZE_FORMAT " words (MaxMetaspaceSize = " SIZE_FORMAT " words)",
213 (is_class ? "class" : "non-class"), word_size, MaxMetaspaceSize / sizeof(MetaWord));
214 return false;
215 }
216
217 return true;
218}
219
220size_t MetaspaceGC::allowed_expansion() {
221 size_t committed_bytes = MetaspaceUtils::committed_bytes();
222 size_t capacity_until_gc = capacity_until_GC();
223
224 assert(capacity_until_gc >= committed_bytes,
225 "capacity_until_gc: " SIZE_FORMAT " < committed_bytes: " SIZE_FORMAT,
226 capacity_until_gc, committed_bytes);
227
228 size_t left_until_max = MaxMetaspaceSize - committed_bytes;
229 size_t left_until_GC = capacity_until_gc - committed_bytes;
230 size_t left_to_commit = MIN2(left_until_GC, left_until_max);
231 log_trace(gc, metaspace, freelist)("allowed expansion words: " SIZE_FORMAT
232 " (left_until_max: " SIZE_FORMAT ", left_until_GC: " SIZE_FORMAT ".",
233 left_to_commit / BytesPerWord, left_until_max / BytesPerWord, left_until_GC / BytesPerWord);
234
235 return left_to_commit / BytesPerWord;
236}
237
238void MetaspaceGC::compute_new_size() {
239 assert(_shrink_factor <= 100, "invalid shrink factor");
240 uint current_shrink_factor = _shrink_factor;
241 _shrink_factor = 0;
242
243 // Using committed_bytes() for used_after_gc is an overestimation, since the
244 // chunk free lists are included in committed_bytes() and the memory in an
245 // un-fragmented chunk free list is available for future allocations.
246 // However, if the chunk free lists becomes fragmented, then the memory may
247 // not be available for future allocations and the memory is therefore "in use".
248 // Including the chunk free lists in the definition of "in use" is therefore
249 // necessary. Not including the chunk free lists can cause capacity_until_GC to
250 // shrink below committed_bytes() and this has caused serious bugs in the past.
251 const size_t used_after_gc = MetaspaceUtils::committed_bytes();
252 const size_t capacity_until_GC = MetaspaceGC::capacity_until_GC();
253
254 const double minimum_free_percentage = MinMetaspaceFreeRatio / 100.0;
255 const double maximum_used_percentage = 1.0 - minimum_free_percentage;
256
257 const double min_tmp = used_after_gc / maximum_used_percentage;
258 size_t minimum_desired_capacity =
259 (size_t)MIN2(min_tmp, double(MaxMetaspaceSize));
260 // Don't shrink less than the initial generation size
261 minimum_desired_capacity = MAX2(minimum_desired_capacity,
262 MetaspaceSize);
263
264 log_trace(gc, metaspace)("MetaspaceGC::compute_new_size: ");
265 log_trace(gc, metaspace)(" minimum_free_percentage: %6.2f maximum_used_percentage: %6.2f",
266 minimum_free_percentage, maximum_used_percentage);
267 log_trace(gc, metaspace)(" used_after_gc : %6.1fKB", used_after_gc / (double) K);
268
269
270 size_t shrink_bytes = 0;
271 if (capacity_until_GC < minimum_desired_capacity) {
272 // If we have less capacity below the metaspace HWM, then
273 // increment the HWM.
274 size_t expand_bytes = minimum_desired_capacity - capacity_until_GC;
275 expand_bytes = align_up(expand_bytes, Metaspace::commit_alignment());
276 // Don't expand unless it's significant
277 if (expand_bytes >= MinMetaspaceExpansion) {
278 size_t new_capacity_until_GC = 0;
279 bool succeeded = MetaspaceGC::inc_capacity_until_GC(expand_bytes, &new_capacity_until_GC);
280 assert(succeeded, "Should always succesfully increment HWM when at safepoint");
281
282 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
283 new_capacity_until_GC,
284 MetaspaceGCThresholdUpdater::ComputeNewSize);
285 log_trace(gc, metaspace)(" expanding: minimum_desired_capacity: %6.1fKB expand_bytes: %6.1fKB MinMetaspaceExpansion: %6.1fKB new metaspace HWM: %6.1fKB",
286 minimum_desired_capacity / (double) K,
287 expand_bytes / (double) K,
288 MinMetaspaceExpansion / (double) K,
289 new_capacity_until_GC / (double) K);
290 }
291 return;
292 }
293
294 // No expansion, now see if we want to shrink
295 // We would never want to shrink more than this
296 assert(capacity_until_GC >= minimum_desired_capacity,
297 SIZE_FORMAT " >= " SIZE_FORMAT,
298 capacity_until_GC, minimum_desired_capacity);
299 size_t max_shrink_bytes = capacity_until_GC - minimum_desired_capacity;
300
301 // Should shrinking be considered?
302 if (MaxMetaspaceFreeRatio < 100) {
303 const double maximum_free_percentage = MaxMetaspaceFreeRatio / 100.0;
304 const double minimum_used_percentage = 1.0 - maximum_free_percentage;
305 const double max_tmp = used_after_gc / minimum_used_percentage;
306 size_t maximum_desired_capacity = (size_t)MIN2(max_tmp, double(MaxMetaspaceSize));
307 maximum_desired_capacity = MAX2(maximum_desired_capacity,
308 MetaspaceSize);
309 log_trace(gc, metaspace)(" maximum_free_percentage: %6.2f minimum_used_percentage: %6.2f",
310 maximum_free_percentage, minimum_used_percentage);
311 log_trace(gc, metaspace)(" minimum_desired_capacity: %6.1fKB maximum_desired_capacity: %6.1fKB",
312 minimum_desired_capacity / (double) K, maximum_desired_capacity / (double) K);
313
314 assert(minimum_desired_capacity <= maximum_desired_capacity,
315 "sanity check");
316
317 if (capacity_until_GC > maximum_desired_capacity) {
318 // Capacity too large, compute shrinking size
319 shrink_bytes = capacity_until_GC - maximum_desired_capacity;
320 // We don't want shrink all the way back to initSize if people call
321 // System.gc(), because some programs do that between "phases" and then
322 // we'd just have to grow the heap up again for the next phase. So we
323 // damp the shrinking: 0% on the first call, 10% on the second call, 40%
324 // on the third call, and 100% by the fourth call. But if we recompute
325 // size without shrinking, it goes back to 0%.
326 shrink_bytes = shrink_bytes / 100 * current_shrink_factor;
327
328 shrink_bytes = align_down(shrink_bytes, Metaspace::commit_alignment());
329
330 assert(shrink_bytes <= max_shrink_bytes,
331 "invalid shrink size " SIZE_FORMAT " not <= " SIZE_FORMAT,
332 shrink_bytes, max_shrink_bytes);
333 if (current_shrink_factor == 0) {
334 _shrink_factor = 10;
335 } else {
336 _shrink_factor = MIN2(current_shrink_factor * 4, (uint) 100);
337 }
338 log_trace(gc, metaspace)(" shrinking: initThreshold: %.1fK maximum_desired_capacity: %.1fK",
339 MetaspaceSize / (double) K, maximum_desired_capacity / (double) K);
340 log_trace(gc, metaspace)(" shrink_bytes: %.1fK current_shrink_factor: %d new shrink factor: %d MinMetaspaceExpansion: %.1fK",
341 shrink_bytes / (double) K, current_shrink_factor, _shrink_factor, MinMetaspaceExpansion / (double) K);
342 }
343 }
344
345 // Don't shrink unless it's significant
346 if (shrink_bytes >= MinMetaspaceExpansion &&
347 ((capacity_until_GC - shrink_bytes) >= MetaspaceSize)) {
348 size_t new_capacity_until_GC = MetaspaceGC::dec_capacity_until_GC(shrink_bytes);
349 Metaspace::tracer()->report_gc_threshold(capacity_until_GC,
350 new_capacity_until_GC,
351 MetaspaceGCThresholdUpdater::ComputeNewSize);
352 }
353}
354
355// MetaspaceUtils
356size_t MetaspaceUtils::_capacity_words [Metaspace:: MetadataTypeCount] = {0, 0};
357size_t MetaspaceUtils::_overhead_words [Metaspace:: MetadataTypeCount] = {0, 0};
358volatile size_t MetaspaceUtils::_used_words [Metaspace:: MetadataTypeCount] = {0, 0};
359
360// Collect used metaspace statistics. This involves walking the CLDG. The resulting
361// output will be the accumulated values for all live metaspaces.
362// Note: method does not do any locking.
363void MetaspaceUtils::collect_statistics(ClassLoaderMetaspaceStatistics* out) {
364 out->reset();
365 ClassLoaderDataGraphMetaspaceIterator iter;
366 while (iter.repeat()) {
367 ClassLoaderMetaspace* msp = iter.get_next();
368 if (msp != NULL) {
369 msp->add_to_statistics(out);
370 }
371 }
372}
373
374size_t MetaspaceUtils::free_in_vs_bytes(Metaspace::MetadataType mdtype) {
375 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
376 return list == NULL ? 0 : list->free_bytes();
377}
378
379size_t MetaspaceUtils::free_in_vs_bytes() {
380 return free_in_vs_bytes(Metaspace::ClassType) + free_in_vs_bytes(Metaspace::NonClassType);
381}
382
383static void inc_stat_nonatomically(size_t* pstat, size_t words) {
384 assert_lock_strong(MetaspaceExpand_lock);
385 (*pstat) += words;
386}
387
388static void dec_stat_nonatomically(size_t* pstat, size_t words) {
389 assert_lock_strong(MetaspaceExpand_lock);
390 const size_t size_now = *pstat;
391 assert(size_now >= words, "About to decrement counter below zero "
392 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
393 size_now, words);
394 *pstat = size_now - words;
395}
396
397static void inc_stat_atomically(volatile size_t* pstat, size_t words) {
398 Atomic::add(words, pstat);
399}
400
401static void dec_stat_atomically(volatile size_t* pstat, size_t words) {
402 const size_t size_now = *pstat;
403 assert(size_now >= words, "About to decrement counter below zero "
404 "(current value: " SIZE_FORMAT ", decrement value: " SIZE_FORMAT ".",
405 size_now, words);
406 Atomic::sub(words, pstat);
407}
408
409void MetaspaceUtils::dec_capacity(Metaspace::MetadataType mdtype, size_t words) {
410 dec_stat_nonatomically(&_capacity_words[mdtype], words);
411}
412void MetaspaceUtils::inc_capacity(Metaspace::MetadataType mdtype, size_t words) {
413 inc_stat_nonatomically(&_capacity_words[mdtype], words);
414}
415void MetaspaceUtils::dec_used(Metaspace::MetadataType mdtype, size_t words) {
416 dec_stat_atomically(&_used_words[mdtype], words);
417}
418void MetaspaceUtils::inc_used(Metaspace::MetadataType mdtype, size_t words) {
419 inc_stat_atomically(&_used_words[mdtype], words);
420}
421void MetaspaceUtils::dec_overhead(Metaspace::MetadataType mdtype, size_t words) {
422 dec_stat_nonatomically(&_overhead_words[mdtype], words);
423}
424void MetaspaceUtils::inc_overhead(Metaspace::MetadataType mdtype, size_t words) {
425 inc_stat_nonatomically(&_overhead_words[mdtype], words);
426}
427
428size_t MetaspaceUtils::reserved_bytes(Metaspace::MetadataType mdtype) {
429 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
430 return list == NULL ? 0 : list->reserved_bytes();
431}
432
433size_t MetaspaceUtils::committed_bytes(Metaspace::MetadataType mdtype) {
434 VirtualSpaceList* list = Metaspace::get_space_list(mdtype);
435 return list == NULL ? 0 : list->committed_bytes();
436}
437
438size_t MetaspaceUtils::min_chunk_size_words() { return Metaspace::first_chunk_word_size(); }
439
440size_t MetaspaceUtils::free_chunks_total_words(Metaspace::MetadataType mdtype) {
441 ChunkManager* chunk_manager = Metaspace::get_chunk_manager(mdtype);
442 if (chunk_manager == NULL) {
443 return 0;
444 }
445 return chunk_manager->free_chunks_total_words();
446}
447
448size_t MetaspaceUtils::free_chunks_total_bytes(Metaspace::MetadataType mdtype) {
449 return free_chunks_total_words(mdtype) * BytesPerWord;
450}
451
452size_t MetaspaceUtils::free_chunks_total_words() {
453 return free_chunks_total_words(Metaspace::ClassType) +
454 free_chunks_total_words(Metaspace::NonClassType);
455}
456
457size_t MetaspaceUtils::free_chunks_total_bytes() {
458 return free_chunks_total_words() * BytesPerWord;
459}
460
461bool MetaspaceUtils::has_chunk_free_list(Metaspace::MetadataType mdtype) {
462 return Metaspace::get_chunk_manager(mdtype) != NULL;
463}
464
465MetaspaceChunkFreeListSummary MetaspaceUtils::chunk_free_list_summary(Metaspace::MetadataType mdtype) {
466 if (!has_chunk_free_list(mdtype)) {
467 return MetaspaceChunkFreeListSummary();
468 }
469
470 const ChunkManager* cm = Metaspace::get_chunk_manager(mdtype);
471 return cm->chunk_free_list_summary();
472}
473
474void MetaspaceUtils::print_metaspace_change(size_t prev_metadata_used) {
475 log_info(gc, metaspace)("Metaspace: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
476 prev_metadata_used/K, used_bytes()/K, reserved_bytes()/K);
477}
478
479void MetaspaceUtils::print_on(outputStream* out) {
480 Metaspace::MetadataType nct = Metaspace::NonClassType;
481
482 out->print_cr(" Metaspace "
483 "used " SIZE_FORMAT "K, "
484 "capacity " SIZE_FORMAT "K, "
485 "committed " SIZE_FORMAT "K, "
486 "reserved " SIZE_FORMAT "K",
487 used_bytes()/K,
488 capacity_bytes()/K,
489 committed_bytes()/K,
490 reserved_bytes()/K);
491
492 if (Metaspace::using_class_space()) {
493 Metaspace::MetadataType ct = Metaspace::ClassType;
494 out->print_cr(" class space "
495 "used " SIZE_FORMAT "K, "
496 "capacity " SIZE_FORMAT "K, "
497 "committed " SIZE_FORMAT "K, "
498 "reserved " SIZE_FORMAT "K",
499 used_bytes(ct)/K,
500 capacity_bytes(ct)/K,
501 committed_bytes(ct)/K,
502 reserved_bytes(ct)/K);
503 }
504}
505
506
507void MetaspaceUtils::print_vs(outputStream* out, size_t scale) {
508 const size_t reserved_nonclass_words = reserved_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
509 const size_t committed_nonclass_words = committed_bytes(Metaspace::NonClassType) / sizeof(MetaWord);
510 {
511 if (Metaspace::using_class_space()) {
512 out->print(" Non-class space: ");
513 }
514 print_scaled_words(out, reserved_nonclass_words, scale, 7);
515 out->print(" reserved, ");
516 print_scaled_words_and_percentage(out, committed_nonclass_words, reserved_nonclass_words, scale, 7);
517 out->print_cr(" committed ");
518
519 if (Metaspace::using_class_space()) {
520 const size_t reserved_class_words = reserved_bytes(Metaspace::ClassType) / sizeof(MetaWord);
521 const size_t committed_class_words = committed_bytes(Metaspace::ClassType) / sizeof(MetaWord);
522 out->print(" Class space: ");
523 print_scaled_words(out, reserved_class_words, scale, 7);
524 out->print(" reserved, ");
525 print_scaled_words_and_percentage(out, committed_class_words, reserved_class_words, scale, 7);
526 out->print_cr(" committed ");
527
528 const size_t reserved_words = reserved_nonclass_words + reserved_class_words;
529 const size_t committed_words = committed_nonclass_words + committed_class_words;
530 out->print(" Both: ");
531 print_scaled_words(out, reserved_words, scale, 7);
532 out->print(" reserved, ");
533 print_scaled_words_and_percentage(out, committed_words, reserved_words, scale, 7);
534 out->print_cr(" committed ");
535 }
536 }
537}
538
539static void print_basic_switches(outputStream* out, size_t scale) {
540 out->print("MaxMetaspaceSize: ");
541 if (MaxMetaspaceSize >= (max_uintx) - (2 * os::vm_page_size())) {
542 // aka "very big". Default is max_uintx, but due to rounding in arg parsing the real
543 // value is smaller.
544 out->print("unlimited");
545 } else {
546 print_human_readable_size(out, MaxMetaspaceSize, scale);
547 }
548 out->cr();
549 if (Metaspace::using_class_space()) {
550 out->print("CompressedClassSpaceSize: ");
551 print_human_readable_size(out, CompressedClassSpaceSize, scale);
552 }
553 out->cr();
554}
555
556// This will print out a basic metaspace usage report but
557// unlike print_report() is guaranteed not to lock or to walk the CLDG.
558void MetaspaceUtils::print_basic_report(outputStream* out, size_t scale) {
559
560 out->cr();
561 out->print_cr("Usage:");
562
563 if (Metaspace::using_class_space()) {
564 out->print(" Non-class: ");
565 }
566
567 // In its most basic form, we do not require walking the CLDG. Instead, just print the running totals from
568 // MetaspaceUtils.
569 const size_t cap_nc = MetaspaceUtils::capacity_words(Metaspace::NonClassType);
570 const size_t overhead_nc = MetaspaceUtils::overhead_words(Metaspace::NonClassType);
571 const size_t used_nc = MetaspaceUtils::used_words(Metaspace::NonClassType);
572 const size_t free_and_waste_nc = cap_nc - overhead_nc - used_nc;
573
574 print_scaled_words(out, cap_nc, scale, 5);
575 out->print(" capacity, ");
576 print_scaled_words_and_percentage(out, used_nc, cap_nc, scale, 5);
577 out->print(" used, ");
578 print_scaled_words_and_percentage(out, free_and_waste_nc, cap_nc, scale, 5);
579 out->print(" free+waste, ");
580 print_scaled_words_and_percentage(out, overhead_nc, cap_nc, scale, 5);
581 out->print(" overhead. ");
582 out->cr();
583
584 if (Metaspace::using_class_space()) {
585 const size_t cap_c = MetaspaceUtils::capacity_words(Metaspace::ClassType);
586 const size_t overhead_c = MetaspaceUtils::overhead_words(Metaspace::ClassType);
587 const size_t used_c = MetaspaceUtils::used_words(Metaspace::ClassType);
588 const size_t free_and_waste_c = cap_c - overhead_c - used_c;
589 out->print(" Class: ");
590 print_scaled_words(out, cap_c, scale, 5);
591 out->print(" capacity, ");
592 print_scaled_words_and_percentage(out, used_c, cap_c, scale, 5);
593 out->print(" used, ");
594 print_scaled_words_and_percentage(out, free_and_waste_c, cap_c, scale, 5);
595 out->print(" free+waste, ");
596 print_scaled_words_and_percentage(out, overhead_c, cap_c, scale, 5);
597 out->print(" overhead. ");
598 out->cr();
599
600 out->print(" Both: ");
601 const size_t cap = cap_nc + cap_c;
602
603 print_scaled_words(out, cap, scale, 5);
604 out->print(" capacity, ");
605 print_scaled_words_and_percentage(out, used_nc + used_c, cap, scale, 5);
606 out->print(" used, ");
607 print_scaled_words_and_percentage(out, free_and_waste_nc + free_and_waste_c, cap, scale, 5);
608 out->print(" free+waste, ");
609 print_scaled_words_and_percentage(out, overhead_nc + overhead_c, cap, scale, 5);
610 out->print(" overhead. ");
611 out->cr();
612 }
613
614 out->cr();
615 out->print_cr("Virtual space:");
616
617 print_vs(out, scale);
618
619 out->cr();
620 out->print_cr("Chunk freelists:");
621
622 if (Metaspace::using_class_space()) {
623 out->print(" Non-Class: ");
624 }
625 print_human_readable_size(out, Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale);
626 out->cr();
627 if (Metaspace::using_class_space()) {
628 out->print(" Class: ");
629 print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes(), scale);
630 out->cr();
631 out->print(" Both: ");
632 print_human_readable_size(out, Metaspace::chunk_manager_class()->free_chunks_total_bytes() +
633 Metaspace::chunk_manager_metadata()->free_chunks_total_bytes(), scale);
634 out->cr();
635 }
636
637 out->cr();
638
639 // Print basic settings
640 print_basic_switches(out, scale);
641
642 out->cr();
643
644}
645
646void MetaspaceUtils::print_report(outputStream* out, size_t scale, int flags) {
647
648 const bool print_loaders = (flags & rf_show_loaders) > 0;
649 const bool print_classes = (flags & rf_show_classes) > 0;
650 const bool print_by_chunktype = (flags & rf_break_down_by_chunktype) > 0;
651 const bool print_by_spacetype = (flags & rf_break_down_by_spacetype) > 0;
652
653 // Some report options require walking the class loader data graph.
654 PrintCLDMetaspaceInfoClosure cl(out, scale, print_loaders, print_classes, print_by_chunktype);
655 if (print_loaders) {
656 out->cr();
657 out->print_cr("Usage per loader:");
658 out->cr();
659 }
660
661 ClassLoaderDataGraph::loaded_cld_do(&cl); // collect data and optionally print
662
663 // Print totals, broken up by space type.
664 if (print_by_spacetype) {
665 out->cr();
666 out->print_cr("Usage per space type:");
667 out->cr();
668 for (int space_type = (int)Metaspace::ZeroMetaspaceType;
669 space_type < (int)Metaspace::MetaspaceTypeCount; space_type ++)
670 {
671 uintx num_loaders = cl._num_loaders_by_spacetype[space_type];
672 uintx num_classes = cl._num_classes_by_spacetype[space_type];
673 out->print("%s - " UINTX_FORMAT " %s",
674 space_type_name((Metaspace::MetaspaceType)space_type),
675 num_loaders, loaders_plural(num_loaders));
676 if (num_classes > 0) {
677 out->print(", ");
678 print_number_of_classes(out, num_classes, cl._num_classes_shared_by_spacetype[space_type]);
679 out->print(":");
680 cl._stats_by_spacetype[space_type].print_on(out, scale, print_by_chunktype);
681 } else {
682 out->print(".");
683 out->cr();
684 }
685 out->cr();
686 }
687 }
688
689 // Print totals for in-use data:
690 out->cr();
691 {
692 uintx num_loaders = cl._num_loaders;
693 out->print("Total Usage - " UINTX_FORMAT " %s, ",
694 num_loaders, loaders_plural(num_loaders));
695 print_number_of_classes(out, cl._num_classes, cl._num_classes_shared);
696 out->print(":");
697 cl._stats_total.print_on(out, scale, print_by_chunktype);
698 out->cr();
699 }
700
701 // -- Print Virtual space.
702 out->cr();
703 out->print_cr("Virtual space:");
704
705 print_vs(out, scale);
706
707 // -- Print VirtualSpaceList details.
708 if ((flags & rf_show_vslist) > 0) {
709 out->cr();
710 out->print_cr("Virtual space list%s:", Metaspace::using_class_space() ? "s" : "");
711
712 if (Metaspace::using_class_space()) {
713 out->print_cr(" Non-Class:");
714 }
715 Metaspace::space_list()->print_on(out, scale);
716 if (Metaspace::using_class_space()) {
717 out->print_cr(" Class:");
718 Metaspace::class_space_list()->print_on(out, scale);
719 }
720 }
721 out->cr();
722
723 // -- Print VirtualSpaceList map.
724 if ((flags & rf_show_vsmap) > 0) {
725 out->cr();
726 out->print_cr("Virtual space map:");
727
728 if (Metaspace::using_class_space()) {
729 out->print_cr(" Non-Class:");
730 }
731 Metaspace::space_list()->print_map(out);
732 if (Metaspace::using_class_space()) {
733 out->print_cr(" Class:");
734 Metaspace::class_space_list()->print_map(out);
735 }
736 }
737 out->cr();
738
739 // -- Print Freelists (ChunkManager) details
740 out->cr();
741 out->print_cr("Chunk freelist%s:", Metaspace::using_class_space() ? "s" : "");
742
743 ChunkManagerStatistics non_class_cm_stat;
744 Metaspace::chunk_manager_metadata()->collect_statistics(&non_class_cm_stat);
745
746 if (Metaspace::using_class_space()) {
747 out->print_cr(" Non-Class:");
748 }
749 non_class_cm_stat.print_on(out, scale);
750
751 if (Metaspace::using_class_space()) {
752 ChunkManagerStatistics class_cm_stat;
753 Metaspace::chunk_manager_class()->collect_statistics(&class_cm_stat);
754 out->print_cr(" Class:");
755 class_cm_stat.print_on(out, scale);
756 }
757
758 // As a convenience, print a summary of common waste.
759 out->cr();
760 out->print("Waste ");
761 // For all wastages, print percentages from total. As total use the total size of memory committed for metaspace.
762 const size_t committed_words = committed_bytes() / BytesPerWord;
763
764 out->print("(percentages refer to total committed size ");
765 print_scaled_words(out, committed_words, scale);
766 out->print_cr("):");
767
768 // Print space committed but not yet used by any class loader
769 const size_t unused_words_in_vs = MetaspaceUtils::free_in_vs_bytes() / BytesPerWord;
770 out->print(" Committed unused: ");
771 print_scaled_words_and_percentage(out, unused_words_in_vs, committed_words, scale, 6);
772 out->cr();
773
774 // Print waste for in-use chunks.
775 UsedChunksStatistics ucs_nonclass = cl._stats_total.nonclass_sm_stats().totals();
776 UsedChunksStatistics ucs_class = cl._stats_total.class_sm_stats().totals();
777 UsedChunksStatistics ucs_all;
778 ucs_all.add(ucs_nonclass);
779 ucs_all.add(ucs_class);
780
781 out->print(" Waste in chunks in use: ");
782 print_scaled_words_and_percentage(out, ucs_all.waste(), committed_words, scale, 6);
783 out->cr();
784 out->print(" Free in chunks in use: ");
785 print_scaled_words_and_percentage(out, ucs_all.free(), committed_words, scale, 6);
786 out->cr();
787 out->print(" Overhead in chunks in use: ");
788 print_scaled_words_and_percentage(out, ucs_all.overhead(), committed_words, scale, 6);
789 out->cr();
790
791 // Print waste in free chunks.
792 const size_t total_capacity_in_free_chunks =
793 Metaspace::chunk_manager_metadata()->free_chunks_total_words() +
794 (Metaspace::using_class_space() ? Metaspace::chunk_manager_class()->free_chunks_total_words() : 0);
795 out->print(" In free chunks: ");
796 print_scaled_words_and_percentage(out, total_capacity_in_free_chunks, committed_words, scale, 6);
797 out->cr();
798
799 // Print waste in deallocated blocks.
800 const uintx free_blocks_num =
801 cl._stats_total.nonclass_sm_stats().free_blocks_num() +
802 cl._stats_total.class_sm_stats().free_blocks_num();
803 const size_t free_blocks_cap_words =
804 cl._stats_total.nonclass_sm_stats().free_blocks_cap_words() +
805 cl._stats_total.class_sm_stats().free_blocks_cap_words();
806 out->print("Deallocated from chunks in use: ");
807 print_scaled_words_and_percentage(out, free_blocks_cap_words, committed_words, scale, 6);
808 out->print(" (" UINTX_FORMAT " blocks)", free_blocks_num);
809 out->cr();
810
811 // Print total waste.
812 const size_t total_waste = ucs_all.waste() + ucs_all.free() + ucs_all.overhead() + total_capacity_in_free_chunks
813 + free_blocks_cap_words + unused_words_in_vs;
814 out->print(" -total-: ");
815 print_scaled_words_and_percentage(out, total_waste, committed_words, scale, 6);
816 out->cr();
817
818 // Print internal statistics
819#ifdef ASSERT
820 out->cr();
821 out->cr();
822 out->print_cr("Internal statistics:");
823 out->cr();
824 out->print_cr("Number of allocations: " UINTX_FORMAT ".", g_internal_statistics.num_allocs);
825 out->print_cr("Number of space births: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_births);
826 out->print_cr("Number of space deaths: " UINTX_FORMAT ".", g_internal_statistics.num_metaspace_deaths);
827 out->print_cr("Number of virtual space node births: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_created);
828 out->print_cr("Number of virtual space node deaths: " UINTX_FORMAT ".", g_internal_statistics.num_vsnodes_purged);
829 out->print_cr("Number of times virtual space nodes were expanded: " UINTX_FORMAT ".", g_internal_statistics.num_committed_space_expanded);
830 out->print_cr("Number of deallocations: " UINTX_FORMAT " (" UINTX_FORMAT " external).", g_internal_statistics.num_deallocs, g_internal_statistics.num_external_deallocs);
831 out->print_cr("Allocations from deallocated blocks: " UINTX_FORMAT ".", g_internal_statistics.num_allocs_from_deallocated_blocks);
832 out->print_cr("Number of chunks added to freelist: " UINTX_FORMAT ".",
833 g_internal_statistics.num_chunks_added_to_freelist);
834 out->print_cr("Number of chunks removed from freelist: " UINTX_FORMAT ".",
835 g_internal_statistics.num_chunks_removed_from_freelist);
836 out->print_cr("Number of chunk merges: " UINTX_FORMAT ", split-ups: " UINTX_FORMAT ".",
837 g_internal_statistics.num_chunk_merges, g_internal_statistics.num_chunk_splits);
838
839 out->cr();
840#endif
841
842 // Print some interesting settings
843 out->cr();
844 out->cr();
845 print_basic_switches(out, scale);
846
847 out->cr();
848 out->print("InitialBootClassLoaderMetaspaceSize: ");
849 print_human_readable_size(out, InitialBootClassLoaderMetaspaceSize, scale);
850
851 out->cr();
852 out->cr();
853
854} // MetaspaceUtils::print_report()
855
856// Prints an ASCII representation of the given space.
857void MetaspaceUtils::print_metaspace_map(outputStream* out, Metaspace::MetadataType mdtype) {
858 MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
859 const bool for_class = mdtype == Metaspace::ClassType ? true : false;
860 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
861 if (vsl != NULL) {
862 if (for_class) {
863 if (!Metaspace::using_class_space()) {
864 out->print_cr("No Class Space.");
865 return;
866 }
867 out->print_raw("---- Metaspace Map (Class Space) ----");
868 } else {
869 out->print_raw("---- Metaspace Map (Non-Class Space) ----");
870 }
871 // Print legend:
872 out->cr();
873 out->print_cr("Chunk Types (uppercase chunks are in use): x-specialized, s-small, m-medium, h-humongous.");
874 out->cr();
875 VirtualSpaceList* const vsl = for_class ? Metaspace::class_space_list() : Metaspace::space_list();
876 vsl->print_map(out);
877 out->cr();
878 }
879}
880
881void MetaspaceUtils::verify_free_chunks() {
882#ifdef ASSERT
883 Metaspace::chunk_manager_metadata()->verify(false);
884 if (Metaspace::using_class_space()) {
885 Metaspace::chunk_manager_class()->verify(false);
886 }
887#endif
888}
889
890void MetaspaceUtils::verify_metrics() {
891#ifdef ASSERT
892 // Please note: there are time windows where the internal counters are out of sync with
893 // reality. For example, when a newly created ClassLoaderMetaspace creates its first chunk -
894 // the ClassLoaderMetaspace is not yet attached to its ClassLoaderData object and hence will
895 // not be counted when iterating the CLDG. So be careful when you call this method.
896 ClassLoaderMetaspaceStatistics total_stat;
897 collect_statistics(&total_stat);
898 UsedChunksStatistics nonclass_chunk_stat = total_stat.nonclass_sm_stats().totals();
899 UsedChunksStatistics class_chunk_stat = total_stat.class_sm_stats().totals();
900
901 bool mismatch = false;
902 for (int i = 0; i < Metaspace::MetadataTypeCount; i ++) {
903 Metaspace::MetadataType mdtype = (Metaspace::MetadataType)i;
904 UsedChunksStatistics chunk_stat = total_stat.sm_stats(mdtype).totals();
905 if (capacity_words(mdtype) != chunk_stat.cap() ||
906 used_words(mdtype) != chunk_stat.used() ||
907 overhead_words(mdtype) != chunk_stat.overhead()) {
908 mismatch = true;
909 tty->print_cr("MetaspaceUtils::verify_metrics: counter mismatch for mdtype=%u:", mdtype);
910 tty->print_cr("Expected cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
911 capacity_words(mdtype), used_words(mdtype), overhead_words(mdtype));
912 tty->print_cr("Got cap " SIZE_FORMAT ", used " SIZE_FORMAT ", overhead " SIZE_FORMAT ".",
913 chunk_stat.cap(), chunk_stat.used(), chunk_stat.overhead());
914 tty->flush();
915 }
916 }
917 assert(mismatch == false, "MetaspaceUtils::verify_metrics: counter mismatch.");
918#endif
919}
920
921// Metaspace methods
922
923size_t Metaspace::_first_chunk_word_size = 0;
924size_t Metaspace::_first_class_chunk_word_size = 0;
925
926size_t Metaspace::_commit_alignment = 0;
927size_t Metaspace::_reserve_alignment = 0;
928
929VirtualSpaceList* Metaspace::_space_list = NULL;
930VirtualSpaceList* Metaspace::_class_space_list = NULL;
931
932ChunkManager* Metaspace::_chunk_manager_metadata = NULL;
933ChunkManager* Metaspace::_chunk_manager_class = NULL;
934
935#define VIRTUALSPACEMULTIPLIER 2
936
937#ifdef _LP64
938static const uint64_t UnscaledClassSpaceMax = (uint64_t(max_juint) + 1);
939
940void Metaspace::set_narrow_klass_base_and_shift(address metaspace_base, address cds_base) {
941 assert(!DumpSharedSpaces, "narrow_klass is set by MetaspaceShared class.");
942 // Figure out the narrow_klass_base and the narrow_klass_shift. The
943 // narrow_klass_base is the lower of the metaspace base and the cds base
944 // (if cds is enabled). The narrow_klass_shift depends on the distance
945 // between the lower base and higher address.
946 address lower_base;
947 address higher_address;
948#if INCLUDE_CDS
949 if (UseSharedSpaces) {
950 higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
951 (address)(metaspace_base + compressed_class_space_size()));
952 lower_base = MIN2(metaspace_base, cds_base);
953 } else
954#endif
955 {
956 higher_address = metaspace_base + compressed_class_space_size();
957 lower_base = metaspace_base;
958
959 uint64_t klass_encoding_max = UnscaledClassSpaceMax << LogKlassAlignmentInBytes;
960 // If compressed class space fits in lower 32G, we don't need a base.
961 if (higher_address <= (address)klass_encoding_max) {
962 lower_base = 0; // Effectively lower base is zero.
963 }
964 }
965
966 CompressedKlassPointers::set_base(lower_base);
967
968 // CDS uses LogKlassAlignmentInBytes for narrow_klass_shift. See
969 // MetaspaceShared::initialize_dumptime_shared_and_meta_spaces() for
970 // how dump time narrow_klass_shift is set. Although, CDS can work
971 // with zero-shift mode also, to be consistent with AOT it uses
972 // LogKlassAlignmentInBytes for klass shift so archived java heap objects
973 // can be used at same time as AOT code.
974 if (!UseSharedSpaces
975 && (uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax) {
976 CompressedKlassPointers::set_shift(0);
977 } else {
978 CompressedKlassPointers::set_shift(LogKlassAlignmentInBytes);
979 }
980 AOTLoader::set_narrow_klass_shift();
981}
982
983#if INCLUDE_CDS
984// Return TRUE if the specified metaspace_base and cds_base are close enough
985// to work with compressed klass pointers.
986bool Metaspace::can_use_cds_with_metaspace_addr(char* metaspace_base, address cds_base) {
987 assert(cds_base != 0 && UseSharedSpaces, "Only use with CDS");
988 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
989 address lower_base = MIN2((address)metaspace_base, cds_base);
990 address higher_address = MAX2((address)(cds_base + MetaspaceShared::core_spaces_size()),
991 (address)(metaspace_base + compressed_class_space_size()));
992 return ((uint64_t)(higher_address - lower_base) <= UnscaledClassSpaceMax);
993}
994#endif
995
996// Try to allocate the metaspace at the requested addr.
997void Metaspace::allocate_metaspace_compressed_klass_ptrs(char* requested_addr, address cds_base) {
998 assert(!DumpSharedSpaces, "compress klass space is allocated by MetaspaceShared class.");
999 assert(using_class_space(), "called improperly");
1000 assert(UseCompressedClassPointers, "Only use with CompressedKlassPtrs");
1001 assert(compressed_class_space_size() < KlassEncodingMetaspaceMax,
1002 "Metaspace size is too big");
1003 assert_is_aligned(requested_addr, _reserve_alignment);
1004 assert_is_aligned(cds_base, _reserve_alignment);
1005 assert_is_aligned(compressed_class_space_size(), _reserve_alignment);
1006
1007 // Don't use large pages for the class space.
1008 bool large_pages = false;
1009
1010#if !(defined(AARCH64) || defined(AIX))
1011 ReservedSpace metaspace_rs = ReservedSpace(compressed_class_space_size(),
1012 _reserve_alignment,
1013 large_pages,
1014 requested_addr);
1015#else // AARCH64
1016 ReservedSpace metaspace_rs;
1017
1018 // Our compressed klass pointers may fit nicely into the lower 32
1019 // bits.
1020 if ((uint64_t)requested_addr + compressed_class_space_size() < 4*G) {
1021 metaspace_rs = ReservedSpace(compressed_class_space_size(),
1022 _reserve_alignment,
1023 large_pages,
1024 requested_addr);
1025 }
1026
1027 if (! metaspace_rs.is_reserved()) {
1028 // Aarch64: Try to align metaspace so that we can decode a compressed
1029 // klass with a single MOVK instruction. We can do this iff the
1030 // compressed class base is a multiple of 4G.
1031 // Aix: Search for a place where we can find memory. If we need to load
1032 // the base, 4G alignment is helpful, too.
1033 size_t increment = AARCH64_ONLY(4*)G;
1034 for (char *a = align_up(requested_addr, increment);
1035 a < (char*)(1024*G);
1036 a += increment) {
1037 if (a == (char *)(32*G)) {
1038 // Go faster from here on. Zero-based is no longer possible.
1039 increment = 4*G;
1040 }
1041
1042#if INCLUDE_CDS
1043 if (UseSharedSpaces
1044 && ! can_use_cds_with_metaspace_addr(a, cds_base)) {
1045 // We failed to find an aligned base that will reach. Fall
1046 // back to using our requested addr.
1047 metaspace_rs = ReservedSpace(compressed_class_space_size(),
1048 _reserve_alignment,
1049 large_pages,
1050 requested_addr);
1051 break;
1052 }
1053#endif
1054
1055 metaspace_rs = ReservedSpace(compressed_class_space_size(),
1056 _reserve_alignment,
1057 large_pages,
1058 a);
1059 if (metaspace_rs.is_reserved())
1060 break;
1061 }
1062 }
1063
1064#endif // AARCH64
1065
1066 if (!metaspace_rs.is_reserved()) {
1067#if INCLUDE_CDS
1068 if (UseSharedSpaces) {
1069 size_t increment = align_up(1*G, _reserve_alignment);
1070
1071 // Keep trying to allocate the metaspace, increasing the requested_addr
1072 // by 1GB each time, until we reach an address that will no longer allow
1073 // use of CDS with compressed klass pointers.
1074 char *addr = requested_addr;
1075 while (!metaspace_rs.is_reserved() && (addr + increment > addr) &&
1076 can_use_cds_with_metaspace_addr(addr + increment, cds_base)) {
1077 addr = addr + increment;
1078 metaspace_rs = ReservedSpace(compressed_class_space_size(),
1079 _reserve_alignment, large_pages, addr);
1080 }
1081 }
1082#endif
1083 // If no successful allocation then try to allocate the space anywhere. If
1084 // that fails then OOM doom. At this point we cannot try allocating the
1085 // metaspace as if UseCompressedClassPointers is off because too much
1086 // initialization has happened that depends on UseCompressedClassPointers.
1087 // So, UseCompressedClassPointers cannot be turned off at this point.
1088 if (!metaspace_rs.is_reserved()) {
1089 metaspace_rs = ReservedSpace(compressed_class_space_size(),
1090 _reserve_alignment, large_pages);
1091 if (!metaspace_rs.is_reserved()) {
1092 vm_exit_during_initialization(err_msg("Could not allocate metaspace: " SIZE_FORMAT " bytes",
1093 compressed_class_space_size()));
1094 }
1095 }
1096 }
1097
1098 // If we got here then the metaspace got allocated.
1099 MemTracker::record_virtual_memory_type((address)metaspace_rs.base(), mtClass);
1100
1101#if INCLUDE_CDS
1102 // Verify that we can use shared spaces. Otherwise, turn off CDS.
1103 if (UseSharedSpaces && !can_use_cds_with_metaspace_addr(metaspace_rs.base(), cds_base)) {
1104 FileMapInfo::stop_sharing_and_unmap(
1105 "Could not allocate metaspace at a compatible address");
1106 }
1107#endif
1108 set_narrow_klass_base_and_shift((address)metaspace_rs.base(),
1109 UseSharedSpaces ? (address)cds_base : 0);
1110
1111 initialize_class_space(metaspace_rs);
1112
1113 LogTarget(Trace, gc, metaspace) lt;
1114 if (lt.is_enabled()) {
1115 ResourceMark rm;
1116 LogStream ls(lt);
1117 print_compressed_class_space(&ls, requested_addr);
1118 }
1119}
1120
1121void Metaspace::print_compressed_class_space(outputStream* st, const char* requested_addr) {
1122 st->print_cr("Narrow klass base: " PTR_FORMAT ", Narrow klass shift: %d",
1123 p2i(CompressedKlassPointers::base()), CompressedKlassPointers::shift());
1124 if (_class_space_list != NULL) {
1125 address base = (address)_class_space_list->current_virtual_space()->bottom();
1126 st->print("Compressed class space size: " SIZE_FORMAT " Address: " PTR_FORMAT,
1127 compressed_class_space_size(), p2i(base));
1128 if (requested_addr != 0) {
1129 st->print(" Req Addr: " PTR_FORMAT, p2i(requested_addr));
1130 }
1131 st->cr();
1132 }
1133}
1134
1135// For UseCompressedClassPointers the class space is reserved above the top of
1136// the Java heap. The argument passed in is at the base of the compressed space.
1137void Metaspace::initialize_class_space(ReservedSpace rs) {
1138 // The reserved space size may be bigger because of alignment, esp with UseLargePages
1139 assert(rs.size() >= CompressedClassSpaceSize,
1140 SIZE_FORMAT " != " SIZE_FORMAT, rs.size(), CompressedClassSpaceSize);
1141 assert(using_class_space(), "Must be using class space");
1142 _class_space_list = new VirtualSpaceList(rs);
1143 _chunk_manager_class = new ChunkManager(true/*is_class*/);
1144
1145 if (!_class_space_list->initialization_succeeded()) {
1146 vm_exit_during_initialization("Failed to setup compressed class space virtual space list.");
1147 }
1148}
1149
1150#endif
1151
1152void Metaspace::ergo_initialize() {
1153 if (DumpSharedSpaces) {
1154 // Using large pages when dumping the shared archive is currently not implemented.
1155 FLAG_SET_ERGO(UseLargePagesInMetaspace, false);
1156 }
1157
1158 size_t page_size = os::vm_page_size();
1159 if (UseLargePages && UseLargePagesInMetaspace) {
1160 page_size = os::large_page_size();
1161 }
1162
1163 _commit_alignment = page_size;
1164 _reserve_alignment = MAX2(page_size, (size_t)os::vm_allocation_granularity());
1165
1166 // Do not use FLAG_SET_ERGO to update MaxMetaspaceSize, since this will
1167 // override if MaxMetaspaceSize was set on the command line or not.
1168 // This information is needed later to conform to the specification of the
1169 // java.lang.management.MemoryUsage API.
1170 //
1171 // Ideally, we would be able to set the default value of MaxMetaspaceSize in
1172 // globals.hpp to the aligned value, but this is not possible, since the
1173 // alignment depends on other flags being parsed.
1174 MaxMetaspaceSize = align_down_bounded(MaxMetaspaceSize, _reserve_alignment);
1175
1176 if (MetaspaceSize > MaxMetaspaceSize) {
1177 MetaspaceSize = MaxMetaspaceSize;
1178 }
1179
1180 MetaspaceSize = align_down_bounded(MetaspaceSize, _commit_alignment);
1181
1182 assert(MetaspaceSize <= MaxMetaspaceSize, "MetaspaceSize should be limited by MaxMetaspaceSize");
1183
1184 MinMetaspaceExpansion = align_down_bounded(MinMetaspaceExpansion, _commit_alignment);
1185 MaxMetaspaceExpansion = align_down_bounded(MaxMetaspaceExpansion, _commit_alignment);
1186
1187 CompressedClassSpaceSize = align_down_bounded(CompressedClassSpaceSize, _reserve_alignment);
1188
1189 // Initial virtual space size will be calculated at global_initialize()
1190 size_t min_metaspace_sz =
1191 VIRTUALSPACEMULTIPLIER * InitialBootClassLoaderMetaspaceSize;
1192 if (UseCompressedClassPointers) {
1193 if ((min_metaspace_sz + CompressedClassSpaceSize) > MaxMetaspaceSize) {
1194 if (min_metaspace_sz >= MaxMetaspaceSize) {
1195 vm_exit_during_initialization("MaxMetaspaceSize is too small.");
1196 } else {
1197 FLAG_SET_ERGO(CompressedClassSpaceSize,
1198 MaxMetaspaceSize - min_metaspace_sz);
1199 }
1200 }
1201 } else if (min_metaspace_sz >= MaxMetaspaceSize) {
1202 FLAG_SET_ERGO(InitialBootClassLoaderMetaspaceSize,
1203 min_metaspace_sz);
1204 }
1205
1206 set_compressed_class_space_size(CompressedClassSpaceSize);
1207}
1208
1209void Metaspace::global_initialize() {
1210 MetaspaceGC::initialize();
1211
1212#if INCLUDE_CDS
1213 if (DumpSharedSpaces) {
1214 MetaspaceShared::initialize_dumptime_shared_and_meta_spaces();
1215 } else if (UseSharedSpaces) {
1216 // If any of the archived space fails to map, UseSharedSpaces
1217 // is reset to false. Fall through to the
1218 // (!DumpSharedSpaces && !UseSharedSpaces) case to set up class
1219 // metaspace.
1220 MetaspaceShared::initialize_runtime_shared_and_meta_spaces();
1221 }
1222
1223 if (DynamicDumpSharedSpaces && !UseSharedSpaces) {
1224 vm_exit_during_initialization("DynamicDumpSharedSpaces is unsupported when base CDS archive is not loaded", NULL);
1225 }
1226
1227 if (!DumpSharedSpaces && !UseSharedSpaces)
1228#endif // INCLUDE_CDS
1229 {
1230#ifdef _LP64
1231 if (using_class_space()) {
1232 char* base = (char*)align_up(Universe::heap()->reserved_region().end(), _reserve_alignment);
1233 allocate_metaspace_compressed_klass_ptrs(base, 0);
1234 }
1235#endif // _LP64
1236 }
1237
1238 // Initialize these before initializing the VirtualSpaceList
1239 _first_chunk_word_size = InitialBootClassLoaderMetaspaceSize / BytesPerWord;
1240 _first_chunk_word_size = align_word_size_up(_first_chunk_word_size);
1241 // Make the first class chunk bigger than a medium chunk so it's not put
1242 // on the medium chunk list. The next chunk will be small and progress
1243 // from there. This size calculated by -version.
1244 _first_class_chunk_word_size = MIN2((size_t)MediumChunk*6,
1245 (CompressedClassSpaceSize/BytesPerWord)*2);
1246 _first_class_chunk_word_size = align_word_size_up(_first_class_chunk_word_size);
1247 // Arbitrarily set the initial virtual space to a multiple
1248 // of the boot class loader size.
1249 size_t word_size = VIRTUALSPACEMULTIPLIER * _first_chunk_word_size;
1250 word_size = align_up(word_size, Metaspace::reserve_alignment_words());
1251
1252 // Initialize the list of virtual spaces.
1253 _space_list = new VirtualSpaceList(word_size);
1254 _chunk_manager_metadata = new ChunkManager(false/*metaspace*/);
1255
1256 if (!_space_list->initialization_succeeded()) {
1257 vm_exit_during_initialization("Unable to setup metadata virtual space list.", NULL);
1258 }
1259
1260 _tracer = new MetaspaceTracer();
1261}
1262
1263void Metaspace::post_initialize() {
1264 MetaspaceGC::post_initialize();
1265}
1266
1267void Metaspace::verify_global_initialization() {
1268 assert(space_list() != NULL, "Metadata VirtualSpaceList has not been initialized");
1269 assert(chunk_manager_metadata() != NULL, "Metadata ChunkManager has not been initialized");
1270
1271 if (using_class_space()) {
1272 assert(class_space_list() != NULL, "Class VirtualSpaceList has not been initialized");
1273 assert(chunk_manager_class() != NULL, "Class ChunkManager has not been initialized");
1274 }
1275}
1276
1277size_t Metaspace::align_word_size_up(size_t word_size) {
1278 size_t byte_size = word_size * wordSize;
1279 return ReservedSpace::allocation_align_size_up(byte_size) / wordSize;
1280}
1281
1282MetaWord* Metaspace::allocate(ClassLoaderData* loader_data, size_t word_size,
1283 MetaspaceObj::Type type, TRAPS) {
1284 assert(!_frozen, "sanity");
1285 assert(!(DumpSharedSpaces && THREAD->is_VM_thread()), "sanity");
1286
1287 if (HAS_PENDING_EXCEPTION) {
1288 assert(false, "Should not allocate with exception pending");
1289 return NULL; // caller does a CHECK_NULL too
1290 }
1291
1292 assert(loader_data != NULL, "Should never pass around a NULL loader_data. "
1293 "ClassLoaderData::the_null_class_loader_data() should have been used.");
1294
1295 MetadataType mdtype = (type == MetaspaceObj::ClassType) ? ClassType : NonClassType;
1296
1297 // Try to allocate metadata.
1298 MetaWord* result = loader_data->metaspace_non_null()->allocate(word_size, mdtype);
1299
1300 if (result == NULL) {
1301 tracer()->report_metaspace_allocation_failure(loader_data, word_size, type, mdtype);
1302
1303 // Allocation failed.
1304 if (is_init_completed()) {
1305 // Only start a GC if the bootstrapping has completed.
1306 // Try to clean out some heap memory and retry. This can prevent premature
1307 // expansion of the metaspace.
1308 result = Universe::heap()->satisfy_failed_metadata_allocation(loader_data, word_size, mdtype);
1309 }
1310 }
1311
1312 if (result == NULL) {
1313 if (DumpSharedSpaces) {
1314 // CDS dumping keeps loading classes, so if we hit an OOM we probably will keep hitting OOM.
1315 // We should abort to avoid generating a potentially bad archive.
1316 vm_exit_during_cds_dumping(err_msg("Failed allocating metaspace object type %s of size " SIZE_FORMAT ". CDS dump aborted.",
1317 MetaspaceObj::type_name(type), word_size * BytesPerWord),
1318 err_msg("Please increase MaxMetaspaceSize (currently " SIZE_FORMAT " bytes).", MaxMetaspaceSize));
1319 }
1320 report_metadata_oome(loader_data, word_size, type, mdtype, THREAD);
1321 assert(HAS_PENDING_EXCEPTION, "sanity");
1322 return NULL;
1323 }
1324
1325 // Zero initialize.
1326 Copy::fill_to_words((HeapWord*)result, word_size, 0);
1327
1328 return result;
1329}
1330
1331void Metaspace::report_metadata_oome(ClassLoaderData* loader_data, size_t word_size, MetaspaceObj::Type type, MetadataType mdtype, TRAPS) {
1332 tracer()->report_metadata_oom(loader_data, word_size, type, mdtype);
1333
1334 // If result is still null, we are out of memory.
1335 Log(gc, metaspace, freelist, oom) log;
1336 if (log.is_info()) {
1337 log.info("Metaspace (%s) allocation failed for size " SIZE_FORMAT,
1338 is_class_space_allocation(mdtype) ? "class" : "data", word_size);
1339 ResourceMark rm;
1340 if (log.is_debug()) {
1341 if (loader_data->metaspace_or_null() != NULL) {
1342 LogStream ls(log.debug());
1343 loader_data->print_value_on(&ls);
1344 }
1345 }
1346 LogStream ls(log.info());
1347 // In case of an OOM, log out a short but still useful report.
1348 MetaspaceUtils::print_basic_report(&ls, 0);
1349 }
1350
1351 bool out_of_compressed_class_space = false;
1352 if (is_class_space_allocation(mdtype)) {
1353 ClassLoaderMetaspace* metaspace = loader_data->metaspace_non_null();
1354 out_of_compressed_class_space =
1355 MetaspaceUtils::committed_bytes(Metaspace::ClassType) +
1356 (metaspace->class_chunk_size(word_size) * BytesPerWord) >
1357 CompressedClassSpaceSize;
1358 }
1359
1360 // -XX:+HeapDumpOnOutOfMemoryError and -XX:OnOutOfMemoryError support
1361 const char* space_string = out_of_compressed_class_space ?
1362 "Compressed class space" : "Metaspace";
1363
1364 report_java_out_of_memory(space_string);
1365
1366 if (JvmtiExport::should_post_resource_exhausted()) {
1367 JvmtiExport::post_resource_exhausted(
1368 JVMTI_RESOURCE_EXHAUSTED_OOM_ERROR,
1369 space_string);
1370 }
1371
1372 if (!is_init_completed()) {
1373 vm_exit_during_initialization("OutOfMemoryError", space_string);
1374 }
1375
1376 if (out_of_compressed_class_space) {
1377 THROW_OOP(Universe::out_of_memory_error_class_metaspace());
1378 } else {
1379 THROW_OOP(Universe::out_of_memory_error_metaspace());
1380 }
1381}
1382
1383const char* Metaspace::metadata_type_name(Metaspace::MetadataType mdtype) {
1384 switch (mdtype) {
1385 case Metaspace::ClassType: return "Class";
1386 case Metaspace::NonClassType: return "Metadata";
1387 default:
1388 assert(false, "Got bad mdtype: %d", (int) mdtype);
1389 return NULL;
1390 }
1391}
1392
1393void Metaspace::purge(MetadataType mdtype) {
1394 get_space_list(mdtype)->purge(get_chunk_manager(mdtype));
1395}
1396
1397void Metaspace::purge() {
1398 MutexLocker cl(MetaspaceExpand_lock,
1399 Mutex::_no_safepoint_check_flag);
1400 purge(NonClassType);
1401 if (using_class_space()) {
1402 purge(ClassType);
1403 }
1404}
1405
1406bool Metaspace::contains(const void* ptr) {
1407 if (MetaspaceShared::is_in_shared_metaspace(ptr)) {
1408 return true;
1409 }
1410 return contains_non_shared(ptr);
1411}
1412
1413bool Metaspace::contains_non_shared(const void* ptr) {
1414 if (using_class_space() && get_space_list(ClassType)->contains(ptr)) {
1415 return true;
1416 }
1417
1418 return get_space_list(NonClassType)->contains(ptr);
1419}
1420
1421// ClassLoaderMetaspace
1422
1423ClassLoaderMetaspace::ClassLoaderMetaspace(Mutex* lock, Metaspace::MetaspaceType type)
1424 : _space_type(type)
1425 , _lock(lock)
1426 , _vsm(NULL)
1427 , _class_vsm(NULL)
1428{
1429 initialize(lock, type);
1430}
1431
1432ClassLoaderMetaspace::~ClassLoaderMetaspace() {
1433 Metaspace::assert_not_frozen();
1434 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_deaths));
1435 delete _vsm;
1436 if (Metaspace::using_class_space()) {
1437 delete _class_vsm;
1438 }
1439}
1440
1441void ClassLoaderMetaspace::initialize_first_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
1442 Metachunk* chunk = get_initialization_chunk(type, mdtype);
1443 if (chunk != NULL) {
1444 // Add to this manager's list of chunks in use and make it the current_chunk().
1445 get_space_manager(mdtype)->add_chunk(chunk, true);
1446 }
1447}
1448
1449Metachunk* ClassLoaderMetaspace::get_initialization_chunk(Metaspace::MetaspaceType type, Metaspace::MetadataType mdtype) {
1450 size_t chunk_word_size = get_space_manager(mdtype)->get_initial_chunk_size(type);
1451
1452 // Get a chunk from the chunk freelist
1453 Metachunk* chunk = Metaspace::get_chunk_manager(mdtype)->chunk_freelist_allocate(chunk_word_size);
1454
1455 if (chunk == NULL) {
1456 chunk = Metaspace::get_space_list(mdtype)->get_new_chunk(chunk_word_size,
1457 get_space_manager(mdtype)->medium_chunk_bunch());
1458 }
1459
1460 return chunk;
1461}
1462
1463void ClassLoaderMetaspace::initialize(Mutex* lock, Metaspace::MetaspaceType type) {
1464 Metaspace::verify_global_initialization();
1465
1466 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_metaspace_births));
1467
1468 // Allocate SpaceManager for metadata objects.
1469 _vsm = new SpaceManager(Metaspace::NonClassType, type, lock);
1470
1471 if (Metaspace::using_class_space()) {
1472 // Allocate SpaceManager for classes.
1473 _class_vsm = new SpaceManager(Metaspace::ClassType, type, lock);
1474 }
1475
1476 MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
1477
1478 // Allocate chunk for metadata objects
1479 initialize_first_chunk(type, Metaspace::NonClassType);
1480
1481 // Allocate chunk for class metadata objects
1482 if (Metaspace::using_class_space()) {
1483 initialize_first_chunk(type, Metaspace::ClassType);
1484 }
1485}
1486
1487MetaWord* ClassLoaderMetaspace::allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1488 Metaspace::assert_not_frozen();
1489
1490 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_allocs));
1491
1492 // Don't use class_vsm() unless UseCompressedClassPointers is true.
1493 if (Metaspace::is_class_space_allocation(mdtype)) {
1494 return class_vsm()->allocate(word_size);
1495 } else {
1496 return vsm()->allocate(word_size);
1497 }
1498}
1499
1500MetaWord* ClassLoaderMetaspace::expand_and_allocate(size_t word_size, Metaspace::MetadataType mdtype) {
1501 Metaspace::assert_not_frozen();
1502 size_t delta_bytes = MetaspaceGC::delta_capacity_until_GC(word_size * BytesPerWord);
1503 assert(delta_bytes > 0, "Must be");
1504
1505 size_t before = 0;
1506 size_t after = 0;
1507 bool can_retry = true;
1508 MetaWord* res;
1509 bool incremented;
1510
1511 // Each thread increments the HWM at most once. Even if the thread fails to increment
1512 // the HWM, an allocation is still attempted. This is because another thread must then
1513 // have incremented the HWM and therefore the allocation might still succeed.
1514 do {
1515 incremented = MetaspaceGC::inc_capacity_until_GC(delta_bytes, &after, &before, &can_retry);
1516 res = allocate(word_size, mdtype);
1517 } while (!incremented && res == NULL && can_retry);
1518
1519 if (incremented) {
1520 Metaspace::tracer()->report_gc_threshold(before, after,
1521 MetaspaceGCThresholdUpdater::ExpandAndAllocate);
1522 log_trace(gc, metaspace)("Increase capacity to GC from " SIZE_FORMAT " to " SIZE_FORMAT, before, after);
1523 }
1524
1525 return res;
1526}
1527
1528size_t ClassLoaderMetaspace::allocated_blocks_bytes() const {
1529 return (vsm()->used_words() +
1530 (Metaspace::using_class_space() ? class_vsm()->used_words() : 0)) * BytesPerWord;
1531}
1532
1533size_t ClassLoaderMetaspace::allocated_chunks_bytes() const {
1534 return (vsm()->capacity_words() +
1535 (Metaspace::using_class_space() ? class_vsm()->capacity_words() : 0)) * BytesPerWord;
1536}
1537
1538void ClassLoaderMetaspace::deallocate(MetaWord* ptr, size_t word_size, bool is_class) {
1539 Metaspace::assert_not_frozen();
1540 assert(!SafepointSynchronize::is_at_safepoint()
1541 || Thread::current()->is_VM_thread(), "should be the VM thread");
1542
1543 DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_external_deallocs));
1544
1545 MutexLocker ml(vsm()->lock(), Mutex::_no_safepoint_check_flag);
1546
1547 if (is_class && Metaspace::using_class_space()) {
1548 class_vsm()->deallocate(ptr, word_size);
1549 } else {
1550 vsm()->deallocate(ptr, word_size);
1551 }
1552}
1553
1554size_t ClassLoaderMetaspace::class_chunk_size(size_t word_size) {
1555 assert(Metaspace::using_class_space(), "Has to use class space");
1556 return class_vsm()->calc_chunk_size(word_size);
1557}
1558
1559void ClassLoaderMetaspace::print_on(outputStream* out) const {
1560 // Print both class virtual space counts and metaspace.
1561 if (Verbose) {
1562 vsm()->print_on(out);
1563 if (Metaspace::using_class_space()) {
1564 class_vsm()->print_on(out);
1565 }
1566 }
1567}
1568
1569void ClassLoaderMetaspace::verify() {
1570 vsm()->verify();
1571 if (Metaspace::using_class_space()) {
1572 class_vsm()->verify();
1573 }
1574}
1575
1576void ClassLoaderMetaspace::add_to_statistics_locked(ClassLoaderMetaspaceStatistics* out) const {
1577 assert_lock_strong(lock());
1578 vsm()->add_to_statistics_locked(&out->nonclass_sm_stats());
1579 if (Metaspace::using_class_space()) {
1580 class_vsm()->add_to_statistics_locked(&out->class_sm_stats());
1581 }
1582}
1583
1584void ClassLoaderMetaspace::add_to_statistics(ClassLoaderMetaspaceStatistics* out) const {
1585 MutexLocker cl(lock(), Mutex::_no_safepoint_check_flag);
1586 add_to_statistics_locked(out);
1587}
1588
1589/////////////// Unit tests ///////////////
1590
1591#ifndef PRODUCT
1592
1593class TestMetaspaceUtilsTest : AllStatic {
1594 public:
1595 static void test_reserved() {
1596 size_t reserved = MetaspaceUtils::reserved_bytes();
1597
1598 assert(reserved > 0, "assert");
1599
1600 size_t committed = MetaspaceUtils::committed_bytes();
1601 assert(committed <= reserved, "assert");
1602
1603 size_t reserved_metadata = MetaspaceUtils::reserved_bytes(Metaspace::NonClassType);
1604 assert(reserved_metadata > 0, "assert");
1605 assert(reserved_metadata <= reserved, "assert");
1606
1607 if (UseCompressedClassPointers) {
1608 size_t reserved_class = MetaspaceUtils::reserved_bytes(Metaspace::ClassType);
1609 assert(reserved_class > 0, "assert");
1610 assert(reserved_class < reserved, "assert");
1611 }
1612 }
1613
1614 static void test_committed() {
1615 size_t committed = MetaspaceUtils::committed_bytes();
1616
1617 assert(committed > 0, "assert");
1618
1619 size_t reserved = MetaspaceUtils::reserved_bytes();
1620 assert(committed <= reserved, "assert");
1621
1622 size_t committed_metadata = MetaspaceUtils::committed_bytes(Metaspace::NonClassType);
1623 assert(committed_metadata > 0, "assert");
1624 assert(committed_metadata <= committed, "assert");
1625
1626 if (UseCompressedClassPointers) {
1627 size_t committed_class = MetaspaceUtils::committed_bytes(Metaspace::ClassType);
1628 assert(committed_class > 0, "assert");
1629 assert(committed_class < committed, "assert");
1630 }
1631 }
1632
1633 static void test_virtual_space_list_large_chunk() {
1634 VirtualSpaceList* vs_list = new VirtualSpaceList(os::vm_allocation_granularity());
1635 MutexLocker cl(MetaspaceExpand_lock, Mutex::_no_safepoint_check_flag);
1636 // A size larger than VirtualSpaceSize (256k) and add one page to make it _not_ be
1637 // vm_allocation_granularity aligned on Windows.
1638 size_t large_size = (size_t)(2*256*K + (os::vm_page_size()/BytesPerWord));
1639 large_size += (os::vm_page_size()/BytesPerWord);
1640 vs_list->get_new_chunk(large_size, 0);
1641 }
1642
1643 static void test() {
1644 test_reserved();
1645 test_committed();
1646 test_virtual_space_list_large_chunk();
1647 }
1648};
1649
1650void TestMetaspaceUtils_test() {
1651 TestMetaspaceUtilsTest::test();
1652}
1653
1654#endif // !PRODUCT
1655
1656struct chunkmanager_statistics_t {
1657 int num_specialized_chunks;
1658 int num_small_chunks;
1659 int num_medium_chunks;
1660 int num_humongous_chunks;
1661};
1662
1663extern void test_metaspace_retrieve_chunkmanager_statistics(Metaspace::MetadataType mdType, chunkmanager_statistics_t* out) {
1664 ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(mdType);
1665 ChunkManagerStatistics stat;
1666 chunk_manager->collect_statistics(&stat);
1667 out->num_specialized_chunks = (int)stat.chunk_stats(SpecializedIndex).num();
1668 out->num_small_chunks = (int)stat.chunk_stats(SmallIndex).num();
1669 out->num_medium_chunks = (int)stat.chunk_stats(MediumIndex).num();
1670 out->num_humongous_chunks = (int)stat.chunk_stats(HumongousIndex).num();
1671}
1672
1673struct chunk_geometry_t {
1674 size_t specialized_chunk_word_size;
1675 size_t small_chunk_word_size;
1676 size_t medium_chunk_word_size;
1677};
1678
1679extern void test_metaspace_retrieve_chunk_geometry(Metaspace::MetadataType mdType, chunk_geometry_t* out) {
1680 if (mdType == Metaspace::NonClassType) {
1681 out->specialized_chunk_word_size = SpecializedChunk;
1682 out->small_chunk_word_size = SmallChunk;
1683 out->medium_chunk_word_size = MediumChunk;
1684 } else {
1685 out->specialized_chunk_word_size = ClassSpecializedChunk;
1686 out->small_chunk_word_size = ClassSmallChunk;
1687 out->medium_chunk_word_size = ClassMediumChunk;
1688 }
1689}
1690