1 | /* |
2 | * Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | |
27 | #include "logging/log.hpp" |
28 | #include "logging/logStream.hpp" |
29 | #include "memory/metaspace/metachunk.hpp" |
30 | #include "memory/metaspace.hpp" |
31 | #include "memory/metaspace/chunkManager.hpp" |
32 | #include "memory/metaspace/metaDebug.hpp" |
33 | #include "memory/metaspace/metaspaceCommon.hpp" |
34 | #include "memory/metaspace/occupancyMap.hpp" |
35 | #include "memory/metaspace/virtualSpaceNode.hpp" |
36 | #include "memory/virtualspace.hpp" |
37 | #include "runtime/os.hpp" |
38 | #include "services/memTracker.hpp" |
39 | #include "utilities/copy.hpp" |
40 | #include "utilities/debug.hpp" |
41 | #include "utilities/globalDefinitions.hpp" |
42 | |
43 | namespace metaspace { |
44 | |
45 | // Decide if large pages should be committed when the memory is reserved. |
46 | static bool should_commit_large_pages_when_reserving(size_t bytes) { |
47 | if (UseLargePages && UseLargePagesInMetaspace && !os::can_commit_large_page_memory()) { |
48 | size_t words = bytes / BytesPerWord; |
49 | bool is_class = false; // We never reserve large pages for the class space. |
50 | if (MetaspaceGC::can_expand(words, is_class) && |
51 | MetaspaceGC::allowed_expansion() >= words) { |
52 | return true; |
53 | } |
54 | } |
55 | |
56 | return false; |
57 | } |
58 | |
59 | // byte_size is the size of the associated virtualspace. |
60 | VirtualSpaceNode::VirtualSpaceNode(bool is_class, size_t bytes) : |
61 | _next(NULL), _is_class(is_class), _rs(), _top(NULL), _container_count(0), _occupancy_map(NULL) { |
62 | assert_is_aligned(bytes, Metaspace::reserve_alignment()); |
63 | bool large_pages = should_commit_large_pages_when_reserving(bytes); |
64 | _rs = ReservedSpace(bytes, Metaspace::reserve_alignment(), large_pages); |
65 | |
66 | if (_rs.is_reserved()) { |
67 | assert(_rs.base() != NULL, "Catch if we get a NULL address" ); |
68 | assert(_rs.size() != 0, "Catch if we get a 0 size" ); |
69 | assert_is_aligned(_rs.base(), Metaspace::reserve_alignment()); |
70 | assert_is_aligned(_rs.size(), Metaspace::reserve_alignment()); |
71 | |
72 | MemTracker::record_virtual_memory_type((address)_rs.base(), mtClass); |
73 | } |
74 | } |
75 | |
76 | void VirtualSpaceNode::purge(ChunkManager* chunk_manager) { |
77 | // When a node is purged, lets give it a thorough examination. |
78 | DEBUG_ONLY(verify(true);) |
79 | Metachunk* chunk = first_chunk(); |
80 | Metachunk* invalid_chunk = (Metachunk*) top(); |
81 | while (chunk < invalid_chunk ) { |
82 | assert(chunk->is_tagged_free(), "Should be tagged free" ); |
83 | MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); |
84 | chunk_manager->remove_chunk(chunk); |
85 | chunk->remove_sentinel(); |
86 | assert(chunk->next() == NULL && |
87 | chunk->prev() == NULL, |
88 | "Was not removed from its list" ); |
89 | chunk = (Metachunk*) next; |
90 | } |
91 | } |
92 | |
93 | void VirtualSpaceNode::print_map(outputStream* st, bool is_class) const { |
94 | |
95 | if (bottom() == top()) { |
96 | return; |
97 | } |
98 | |
99 | const size_t spec_chunk_size = is_class ? ClassSpecializedChunk : SpecializedChunk; |
100 | const size_t small_chunk_size = is_class ? ClassSmallChunk : SmallChunk; |
101 | const size_t med_chunk_size = is_class ? ClassMediumChunk : MediumChunk; |
102 | |
103 | int line_len = 100; |
104 | const size_t section_len = align_up(spec_chunk_size * line_len, med_chunk_size); |
105 | line_len = (int)(section_len / spec_chunk_size); |
106 | |
107 | static const int NUM_LINES = 4; |
108 | |
109 | char* lines[NUM_LINES]; |
110 | for (int i = 0; i < NUM_LINES; i ++) { |
111 | lines[i] = (char*)os::malloc(line_len, mtInternal); |
112 | } |
113 | int pos = 0; |
114 | const MetaWord* p = bottom(); |
115 | const Metachunk* chunk = (const Metachunk*)p; |
116 | const MetaWord* chunk_end = p + chunk->word_size(); |
117 | while (p < top()) { |
118 | if (pos == line_len) { |
119 | pos = 0; |
120 | for (int i = 0; i < NUM_LINES; i ++) { |
121 | st->fill_to(22); |
122 | st->print_raw(lines[i], line_len); |
123 | st->cr(); |
124 | } |
125 | } |
126 | if (pos == 0) { |
127 | st->print(PTR_FORMAT ":" , p2i(p)); |
128 | } |
129 | if (p == chunk_end) { |
130 | chunk = (Metachunk*)p; |
131 | chunk_end = p + chunk->word_size(); |
132 | } |
133 | // line 1: chunk starting points (a dot if that area is a chunk start). |
134 | lines[0][pos] = p == (const MetaWord*)chunk ? '.' : ' '; |
135 | |
136 | // Line 2: chunk type (x=spec, s=small, m=medium, h=humongous), uppercase if |
137 | // chunk is in use. |
138 | const bool chunk_is_free = ((Metachunk*)chunk)->is_tagged_free(); |
139 | if (chunk->word_size() == spec_chunk_size) { |
140 | lines[1][pos] = chunk_is_free ? 'x' : 'X'; |
141 | } else if (chunk->word_size() == small_chunk_size) { |
142 | lines[1][pos] = chunk_is_free ? 's' : 'S'; |
143 | } else if (chunk->word_size() == med_chunk_size) { |
144 | lines[1][pos] = chunk_is_free ? 'm' : 'M'; |
145 | } else if (chunk->word_size() > med_chunk_size) { |
146 | lines[1][pos] = chunk_is_free ? 'h' : 'H'; |
147 | } else { |
148 | ShouldNotReachHere(); |
149 | } |
150 | |
151 | // Line 3: chunk origin |
152 | const ChunkOrigin origin = chunk->get_origin(); |
153 | lines[2][pos] = origin == origin_normal ? ' ' : '0' + (int) origin; |
154 | |
155 | // Line 4: Virgin chunk? Virgin chunks are chunks created as a byproduct of padding or splitting, |
156 | // but were never used. |
157 | lines[3][pos] = chunk->get_use_count() > 0 ? ' ' : 'v'; |
158 | |
159 | p += spec_chunk_size; |
160 | pos ++; |
161 | } |
162 | if (pos > 0) { |
163 | for (int i = 0; i < NUM_LINES; i ++) { |
164 | st->fill_to(22); |
165 | st->print_raw(lines[i], line_len); |
166 | st->cr(); |
167 | } |
168 | } |
169 | for (int i = 0; i < NUM_LINES; i ++) { |
170 | os::free(lines[i]); |
171 | } |
172 | } |
173 | |
174 | |
175 | #ifdef ASSERT |
176 | |
177 | // Verify counters, all chunks in this list node and the occupancy map. |
178 | void VirtualSpaceNode::verify(bool slow) { |
179 | log_trace(gc, metaspace, freelist)("verifying %s virtual space node (%s)." , |
180 | (is_class() ? "class space" : "metaspace" ), (slow ? "slow" : "quick" )); |
181 | // Fast mode: just verify chunk counters and basic geometry |
182 | // Slow mode: verify chunks and occupancy map |
183 | uintx num_in_use_chunks = 0; |
184 | Metachunk* chunk = first_chunk(); |
185 | Metachunk* invalid_chunk = (Metachunk*) top(); |
186 | |
187 | // Iterate the chunks in this node and verify each chunk. |
188 | while (chunk < invalid_chunk ) { |
189 | if (slow) { |
190 | do_verify_chunk(chunk); |
191 | } |
192 | if (!chunk->is_tagged_free()) { |
193 | num_in_use_chunks ++; |
194 | } |
195 | const size_t s = chunk->word_size(); |
196 | // Prevent endless loop on invalid chunk size. |
197 | assert(is_valid_chunksize(is_class(), s), "Invalid chunk size: " SIZE_FORMAT "." , s); |
198 | MetaWord* next = ((MetaWord*)chunk) + s; |
199 | chunk = (Metachunk*) next; |
200 | } |
201 | assert(_container_count == num_in_use_chunks, "Container count mismatch (real: " UINTX_FORMAT |
202 | ", counter: " UINTX_FORMAT "." , num_in_use_chunks, _container_count); |
203 | // Also verify the occupancy map. |
204 | if (slow) { |
205 | occupancy_map()->verify(bottom(), top()); |
206 | } |
207 | } |
208 | |
209 | // Verify that all free chunks in this node are ideally merged |
210 | // (there not should be multiple small chunks where a large chunk could exist.) |
211 | void VirtualSpaceNode::verify_free_chunks_are_ideally_merged() { |
212 | Metachunk* chunk = first_chunk(); |
213 | Metachunk* invalid_chunk = (Metachunk*) top(); |
214 | // Shorthands. |
215 | const size_t size_med = (is_class() ? ClassMediumChunk : MediumChunk) * BytesPerWord; |
216 | const size_t size_small = (is_class() ? ClassSmallChunk : SmallChunk) * BytesPerWord; |
217 | int num_free_chunks_since_last_med_boundary = -1; |
218 | int num_free_chunks_since_last_small_boundary = -1; |
219 | bool error = false; |
220 | char err[256]; |
221 | while (!error && chunk < invalid_chunk ) { |
222 | // Test for missed chunk merge opportunities: count number of free chunks since last chunk boundary. |
223 | // Reset the counter when encountering a non-free chunk. |
224 | if (chunk->get_chunk_type() != HumongousIndex) { |
225 | if (chunk->is_tagged_free()) { |
226 | // Count successive free, non-humongous chunks. |
227 | if (is_aligned(chunk, size_small)) { |
228 | if (num_free_chunks_since_last_small_boundary > 0) { |
229 | error = true; |
230 | jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a small chunk preceding " PTR_FORMAT "." , p2i(chunk)); |
231 | } else { |
232 | num_free_chunks_since_last_small_boundary = 0; |
233 | } |
234 | } else if (num_free_chunks_since_last_small_boundary != -1) { |
235 | num_free_chunks_since_last_small_boundary ++; |
236 | } |
237 | if (is_aligned(chunk, size_med)) { |
238 | if (num_free_chunks_since_last_med_boundary > 0) { |
239 | error = true; |
240 | jio_snprintf(err, sizeof(err), "Missed chunk merge opportunity to merge a medium chunk preceding " PTR_FORMAT "." , p2i(chunk)); |
241 | } else { |
242 | num_free_chunks_since_last_med_boundary = 0; |
243 | } |
244 | } else if (num_free_chunks_since_last_med_boundary != -1) { |
245 | num_free_chunks_since_last_med_boundary ++; |
246 | } |
247 | } else { |
248 | // Encountering a non-free chunk, reset counters. |
249 | num_free_chunks_since_last_med_boundary = -1; |
250 | num_free_chunks_since_last_small_boundary = -1; |
251 | } |
252 | } else { |
253 | // One cannot merge areas with a humongous chunk in the middle. Reset counters. |
254 | num_free_chunks_since_last_med_boundary = -1; |
255 | num_free_chunks_since_last_small_boundary = -1; |
256 | } |
257 | |
258 | if (error) { |
259 | print_map(tty, is_class()); |
260 | fatal("%s" , err); |
261 | } |
262 | |
263 | MetaWord* next = ((MetaWord*)chunk) + chunk->word_size(); |
264 | chunk = (Metachunk*) next; |
265 | } |
266 | } |
267 | #endif // ASSERT |
268 | |
269 | void VirtualSpaceNode::inc_container_count() { |
270 | assert_lock_strong(MetaspaceExpand_lock); |
271 | _container_count++; |
272 | } |
273 | |
274 | void VirtualSpaceNode::dec_container_count() { |
275 | assert_lock_strong(MetaspaceExpand_lock); |
276 | _container_count--; |
277 | } |
278 | |
279 | VirtualSpaceNode::~VirtualSpaceNode() { |
280 | _rs.release(); |
281 | if (_occupancy_map != NULL) { |
282 | delete _occupancy_map; |
283 | } |
284 | #ifdef ASSERT |
285 | size_t word_size = sizeof(*this) / BytesPerWord; |
286 | Copy::fill_to_words((HeapWord*) this, word_size, 0xf1f1f1f1); |
287 | #endif |
288 | } |
289 | |
290 | size_t VirtualSpaceNode::used_words_in_vs() const { |
291 | return pointer_delta(top(), bottom(), sizeof(MetaWord)); |
292 | } |
293 | |
294 | // Space committed in the VirtualSpace |
295 | size_t VirtualSpaceNode::capacity_words_in_vs() const { |
296 | return pointer_delta(end(), bottom(), sizeof(MetaWord)); |
297 | } |
298 | |
299 | size_t VirtualSpaceNode::free_words_in_vs() const { |
300 | return pointer_delta(end(), top(), sizeof(MetaWord)); |
301 | } |
302 | |
303 | // Given an address larger than top(), allocate padding chunks until top is at the given address. |
304 | void VirtualSpaceNode::allocate_padding_chunks_until_top_is_at(MetaWord* target_top) { |
305 | |
306 | assert(target_top > top(), "Sanity" ); |
307 | |
308 | // Padding chunks are added to the freelist. |
309 | ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class()); |
310 | |
311 | // shorthands |
312 | const size_t spec_word_size = chunk_manager->specialized_chunk_word_size(); |
313 | const size_t small_word_size = chunk_manager->small_chunk_word_size(); |
314 | const size_t med_word_size = chunk_manager->medium_chunk_word_size(); |
315 | |
316 | while (top() < target_top) { |
317 | |
318 | // We could make this coding more generic, but right now we only deal with two possible chunk sizes |
319 | // for padding chunks, so it is not worth it. |
320 | size_t padding_chunk_word_size = small_word_size; |
321 | if (is_aligned(top(), small_word_size * sizeof(MetaWord)) == false) { |
322 | assert_is_aligned(top(), spec_word_size * sizeof(MetaWord)); // Should always hold true. |
323 | padding_chunk_word_size = spec_word_size; |
324 | } |
325 | MetaWord* here = top(); |
326 | assert_is_aligned(here, padding_chunk_word_size * sizeof(MetaWord)); |
327 | inc_top(padding_chunk_word_size); |
328 | |
329 | // Create new padding chunk. |
330 | ChunkIndex padding_chunk_type = get_chunk_type_by_size(padding_chunk_word_size, is_class()); |
331 | assert(padding_chunk_type == SpecializedIndex || padding_chunk_type == SmallIndex, "sanity" ); |
332 | |
333 | Metachunk* const padding_chunk = |
334 | ::new (here) Metachunk(padding_chunk_type, is_class(), padding_chunk_word_size, this); |
335 | assert(padding_chunk == (Metachunk*)here, "Sanity" ); |
336 | DEBUG_ONLY(padding_chunk->set_origin(origin_pad);) |
337 | log_trace(gc, metaspace, freelist)("Created padding chunk in %s at " |
338 | PTR_FORMAT ", size " SIZE_FORMAT_HEX "." , |
339 | (is_class() ? "class space " : "metaspace" ), |
340 | p2i(padding_chunk), padding_chunk->word_size() * sizeof(MetaWord)); |
341 | |
342 | // Mark chunk start in occupancy map. |
343 | occupancy_map()->set_chunk_starts_at_address((MetaWord*)padding_chunk, true); |
344 | |
345 | // Chunks are born as in-use (see MetaChunk ctor). So, before returning |
346 | // the padding chunk to its chunk manager, mark it as in use (ChunkManager |
347 | // will assert that). |
348 | do_update_in_use_info_for_chunk(padding_chunk, true); |
349 | |
350 | // Return Chunk to freelist. |
351 | inc_container_count(); |
352 | chunk_manager->return_single_chunk(padding_chunk); |
353 | // Please note: at this point, ChunkManager::return_single_chunk() |
354 | // may already have merged the padding chunk with neighboring chunks, so |
355 | // it may have vanished at this point. Do not reference the padding |
356 | // chunk beyond this point. |
357 | } |
358 | |
359 | assert(top() == target_top, "Sanity" ); |
360 | |
361 | } // allocate_padding_chunks_until_top_is_at() |
362 | |
363 | // Allocates the chunk from the virtual space only. |
364 | // This interface is also used internally for debugging. Not all |
365 | // chunks removed here are necessarily used for allocation. |
366 | Metachunk* VirtualSpaceNode::take_from_committed(size_t chunk_word_size) { |
367 | // Non-humongous chunks are to be allocated aligned to their chunk |
368 | // size. So, start addresses of medium chunks are aligned to medium |
369 | // chunk size, those of small chunks to small chunk size and so |
370 | // forth. This facilitates merging of free chunks and reduces |
371 | // fragmentation. Chunk sizes are spec < small < medium, with each |
372 | // larger chunk size being a multiple of the next smaller chunk |
373 | // size. |
374 | // Because of this alignment, me may need to create a number of padding |
375 | // chunks. These chunks are created and added to the freelist. |
376 | |
377 | // The chunk manager to which we will give our padding chunks. |
378 | ChunkManager* const chunk_manager = Metaspace::get_chunk_manager(is_class()); |
379 | |
380 | // shorthands |
381 | const size_t spec_word_size = chunk_manager->specialized_chunk_word_size(); |
382 | const size_t small_word_size = chunk_manager->small_chunk_word_size(); |
383 | const size_t med_word_size = chunk_manager->medium_chunk_word_size(); |
384 | |
385 | assert(chunk_word_size == spec_word_size || chunk_word_size == small_word_size || |
386 | chunk_word_size >= med_word_size, "Invalid chunk size requested." ); |
387 | |
388 | // Chunk alignment (in bytes) == chunk size unless humongous. |
389 | // Humongous chunks are aligned to the smallest chunk size (spec). |
390 | const size_t required_chunk_alignment = (chunk_word_size > med_word_size ? |
391 | spec_word_size : chunk_word_size) * sizeof(MetaWord); |
392 | |
393 | // Do we have enough space to create the requested chunk plus |
394 | // any padding chunks needed? |
395 | MetaWord* const next_aligned = |
396 | static_cast<MetaWord*>(align_up(top(), required_chunk_alignment)); |
397 | if (!is_available((next_aligned - top()) + chunk_word_size)) { |
398 | return NULL; |
399 | } |
400 | |
401 | // Before allocating the requested chunk, allocate padding chunks if necessary. |
402 | // We only need to do this for small or medium chunks: specialized chunks are the |
403 | // smallest size, hence always aligned. Homungous chunks are allocated unaligned |
404 | // (implicitly, also aligned to smallest chunk size). |
405 | if ((chunk_word_size == med_word_size || chunk_word_size == small_word_size) && next_aligned > top()) { |
406 | log_trace(gc, metaspace, freelist)("Creating padding chunks in %s between %p and %p..." , |
407 | (is_class() ? "class space " : "metaspace" ), |
408 | top(), next_aligned); |
409 | allocate_padding_chunks_until_top_is_at(next_aligned); |
410 | // Now, top should be aligned correctly. |
411 | assert_is_aligned(top(), required_chunk_alignment); |
412 | } |
413 | |
414 | // Now, top should be aligned correctly. |
415 | assert_is_aligned(top(), required_chunk_alignment); |
416 | |
417 | // Bottom of the new chunk |
418 | MetaWord* chunk_limit = top(); |
419 | assert(chunk_limit != NULL, "Not safe to call this method" ); |
420 | |
421 | // The virtual spaces are always expanded by the |
422 | // commit granularity to enforce the following condition. |
423 | // Without this the is_available check will not work correctly. |
424 | assert(_virtual_space.committed_size() == _virtual_space.actual_committed_size(), |
425 | "The committed memory doesn't match the expanded memory." ); |
426 | |
427 | if (!is_available(chunk_word_size)) { |
428 | LogTarget(Trace, gc, metaspace, freelist) lt; |
429 | if (lt.is_enabled()) { |
430 | LogStream ls(lt); |
431 | ls.print("VirtualSpaceNode::take_from_committed() not available " SIZE_FORMAT " words " , chunk_word_size); |
432 | // Dump some information about the virtual space that is nearly full |
433 | print_on(&ls); |
434 | } |
435 | return NULL; |
436 | } |
437 | |
438 | // Take the space (bump top on the current virtual space). |
439 | inc_top(chunk_word_size); |
440 | |
441 | // Initialize the chunk |
442 | ChunkIndex chunk_type = get_chunk_type_by_size(chunk_word_size, is_class()); |
443 | Metachunk* result = ::new (chunk_limit) Metachunk(chunk_type, is_class(), chunk_word_size, this); |
444 | assert(result == (Metachunk*)chunk_limit, "Sanity" ); |
445 | occupancy_map()->set_chunk_starts_at_address((MetaWord*)result, true); |
446 | do_update_in_use_info_for_chunk(result, true); |
447 | |
448 | inc_container_count(); |
449 | |
450 | #ifdef ASSERT |
451 | EVERY_NTH(VerifyMetaspaceInterval) |
452 | chunk_manager->locked_verify(true); |
453 | verify(true); |
454 | END_EVERY_NTH |
455 | do_verify_chunk(result); |
456 | #endif |
457 | |
458 | result->inc_use_count(); |
459 | |
460 | return result; |
461 | } |
462 | |
463 | |
464 | // Expand the virtual space (commit more of the reserved space) |
465 | bool VirtualSpaceNode::expand_by(size_t min_words, size_t preferred_words) { |
466 | size_t min_bytes = min_words * BytesPerWord; |
467 | size_t preferred_bytes = preferred_words * BytesPerWord; |
468 | |
469 | size_t uncommitted = virtual_space()->reserved_size() - virtual_space()->actual_committed_size(); |
470 | |
471 | if (uncommitted < min_bytes) { |
472 | return false; |
473 | } |
474 | |
475 | size_t commit = MIN2(preferred_bytes, uncommitted); |
476 | bool result = virtual_space()->expand_by(commit, false); |
477 | |
478 | if (result) { |
479 | log_trace(gc, metaspace, freelist)("Expanded %s virtual space list node by " SIZE_FORMAT " words." , |
480 | (is_class() ? "class" : "non-class" ), commit); |
481 | DEBUG_ONLY(Atomic::inc(&g_internal_statistics.num_committed_space_expanded)); |
482 | } else { |
483 | log_trace(gc, metaspace, freelist)("Failed to expand %s virtual space list node by " SIZE_FORMAT " words." , |
484 | (is_class() ? "class" : "non-class" ), commit); |
485 | } |
486 | |
487 | assert(result, "Failed to commit memory" ); |
488 | |
489 | return result; |
490 | } |
491 | |
492 | Metachunk* VirtualSpaceNode::get_chunk_vs(size_t chunk_word_size) { |
493 | assert_lock_strong(MetaspaceExpand_lock); |
494 | Metachunk* result = take_from_committed(chunk_word_size); |
495 | return result; |
496 | } |
497 | |
498 | bool VirtualSpaceNode::initialize() { |
499 | |
500 | if (!_rs.is_reserved()) { |
501 | return false; |
502 | } |
503 | |
504 | // These are necessary restriction to make sure that the virtual space always |
505 | // grows in steps of Metaspace::commit_alignment(). If both base and size are |
506 | // aligned only the middle alignment of the VirtualSpace is used. |
507 | assert_is_aligned(_rs.base(), Metaspace::commit_alignment()); |
508 | assert_is_aligned(_rs.size(), Metaspace::commit_alignment()); |
509 | |
510 | // ReservedSpaces marked as special will have the entire memory |
511 | // pre-committed. Setting a committed size will make sure that |
512 | // committed_size and actual_committed_size agrees. |
513 | size_t pre_committed_size = _rs.special() ? _rs.size() : 0; |
514 | |
515 | bool result = virtual_space()->initialize_with_granularity(_rs, pre_committed_size, |
516 | Metaspace::commit_alignment()); |
517 | if (result) { |
518 | assert(virtual_space()->committed_size() == virtual_space()->actual_committed_size(), |
519 | "Checking that the pre-committed memory was registered by the VirtualSpace" ); |
520 | |
521 | set_top((MetaWord*)virtual_space()->low()); |
522 | } |
523 | |
524 | // Initialize Occupancy Map. |
525 | const size_t smallest_chunk_size = is_class() ? ClassSpecializedChunk : SpecializedChunk; |
526 | _occupancy_map = new OccupancyMap(bottom(), reserved_words(), smallest_chunk_size); |
527 | |
528 | return result; |
529 | } |
530 | |
531 | void VirtualSpaceNode::print_on(outputStream* st, size_t scale) const { |
532 | size_t used_words = used_words_in_vs(); |
533 | size_t commit_words = committed_words(); |
534 | size_t res_words = reserved_words(); |
535 | VirtualSpace* vs = virtual_space(); |
536 | |
537 | st->print("node @" PTR_FORMAT ": " , p2i(this)); |
538 | st->print("reserved=" ); |
539 | print_scaled_words(st, res_words, scale); |
540 | st->print(", committed=" ); |
541 | print_scaled_words_and_percentage(st, commit_words, res_words, scale); |
542 | st->print(", used=" ); |
543 | print_scaled_words_and_percentage(st, used_words, res_words, scale); |
544 | st->cr(); |
545 | st->print(" [" PTR_FORMAT ", " PTR_FORMAT ", " |
546 | PTR_FORMAT ", " PTR_FORMAT ")" , |
547 | p2i(bottom()), p2i(top()), p2i(end()), |
548 | p2i(vs->high_boundary())); |
549 | } |
550 | |
551 | #ifdef ASSERT |
552 | void VirtualSpaceNode::mangle() { |
553 | size_t word_size = capacity_words_in_vs(); |
554 | Copy::fill_to_words((HeapWord*) low(), word_size, 0xf1f1f1f1); |
555 | } |
556 | #endif // ASSERT |
557 | |
558 | void VirtualSpaceNode::retire(ChunkManager* chunk_manager) { |
559 | assert(is_class() == chunk_manager->is_class(), "Wrong ChunkManager?" ); |
560 | #ifdef ASSERT |
561 | verify(false); |
562 | EVERY_NTH(VerifyMetaspaceInterval) |
563 | verify(true); |
564 | END_EVERY_NTH |
565 | #endif |
566 | for (int i = (int)MediumIndex; i >= (int)ZeroIndex; --i) { |
567 | ChunkIndex index = (ChunkIndex)i; |
568 | size_t chunk_size = chunk_manager->size_by_index(index); |
569 | |
570 | while (free_words_in_vs() >= chunk_size) { |
571 | Metachunk* chunk = get_chunk_vs(chunk_size); |
572 | // Chunk will be allocated aligned, so allocation may require |
573 | // additional padding chunks. That may cause above allocation to |
574 | // fail. Just ignore the failed allocation and continue with the |
575 | // next smaller chunk size. As the VirtualSpaceNode comitted |
576 | // size should be a multiple of the smallest chunk size, we |
577 | // should always be able to fill the VirtualSpace completely. |
578 | if (chunk == NULL) { |
579 | break; |
580 | } |
581 | chunk_manager->return_single_chunk(chunk); |
582 | } |
583 | } |
584 | assert(free_words_in_vs() == 0, "should be empty now" ); |
585 | } |
586 | |
587 | } // namespace metaspace |
588 | |
589 | |