1 | /* |
2 | * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/parallel/asPSYoungGen.hpp" |
27 | #include "gc/parallel/parallelScavengeHeap.hpp" |
28 | #include "gc/parallel/psMarkSweepDecorator.hpp" |
29 | #include "gc/parallel/psScavenge.inline.hpp" |
30 | #include "gc/parallel/psYoungGen.hpp" |
31 | #include "gc/shared/gcUtil.hpp" |
32 | #include "gc/shared/genArguments.hpp" |
33 | #include "gc/shared/spaceDecorator.hpp" |
34 | #include "oops/oop.inline.hpp" |
35 | #include "runtime/java.hpp" |
36 | #include "utilities/align.hpp" |
37 | |
38 | ASPSYoungGen::ASPSYoungGen(size_t init_byte_size, |
39 | size_t minimum_byte_size, |
40 | size_t byte_size_limit) : |
41 | PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit), |
42 | _gen_size_limit(byte_size_limit) { |
43 | } |
44 | |
45 | |
46 | ASPSYoungGen::ASPSYoungGen(PSVirtualSpace* vs, |
47 | size_t init_byte_size, |
48 | size_t minimum_byte_size, |
49 | size_t byte_size_limit) : |
50 | //PSYoungGen(init_byte_size, minimum_byte_size, byte_size_limit), |
51 | PSYoungGen(vs->committed_size(), minimum_byte_size, byte_size_limit), |
52 | _gen_size_limit(byte_size_limit) { |
53 | |
54 | assert(vs->committed_size() == init_byte_size, "Cannot replace with" ); |
55 | |
56 | _virtual_space = vs; |
57 | } |
58 | |
59 | void ASPSYoungGen::initialize_virtual_space(ReservedSpace rs, |
60 | size_t alignment) { |
61 | assert(_init_gen_size != 0, "Should have a finite size" ); |
62 | _virtual_space = new PSVirtualSpaceHighToLow(rs, alignment); |
63 | if (!_virtual_space->expand_by(_init_gen_size)) { |
64 | vm_exit_during_initialization("Could not reserve enough space for " |
65 | "object heap" ); |
66 | } |
67 | } |
68 | |
69 | void ASPSYoungGen::initialize(ReservedSpace rs, size_t alignment) { |
70 | initialize_virtual_space(rs, alignment); |
71 | initialize_work(); |
72 | } |
73 | |
74 | size_t ASPSYoungGen::available_for_expansion() { |
75 | size_t current_committed_size = virtual_space()->committed_size(); |
76 | assert((gen_size_limit() >= current_committed_size), |
77 | "generation size limit is wrong" ); |
78 | |
79 | size_t result = gen_size_limit() - current_committed_size; |
80 | size_t result_aligned = align_down(result, GenAlignment); |
81 | return result_aligned; |
82 | } |
83 | |
84 | // Return the number of bytes the young gen is willing give up. |
85 | // |
86 | // Future implementations could check the survivors and if to_space is in the |
87 | // right place (below from_space), take a chunk from to_space. |
88 | size_t ASPSYoungGen::available_for_contraction() { |
89 | size_t uncommitted_bytes = virtual_space()->uncommitted_size(); |
90 | if (uncommitted_bytes != 0) { |
91 | return uncommitted_bytes; |
92 | } |
93 | |
94 | if (eden_space()->is_empty()) { |
95 | // Respect the minimum size for eden and for the young gen as a whole. |
96 | ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
97 | const size_t eden_alignment = SpaceAlignment; |
98 | |
99 | assert(eden_space()->capacity_in_bytes() >= eden_alignment, |
100 | "Alignment is wrong" ); |
101 | size_t eden_avail = eden_space()->capacity_in_bytes() - eden_alignment; |
102 | eden_avail = align_down(eden_avail, GenAlignment); |
103 | |
104 | assert(virtual_space()->committed_size() >= min_gen_size(), |
105 | "minimum gen size is wrong" ); |
106 | size_t gen_avail = virtual_space()->committed_size() - min_gen_size(); |
107 | assert(virtual_space()->is_aligned(gen_avail), "not aligned" ); |
108 | |
109 | const size_t max_contraction = MIN2(eden_avail, gen_avail); |
110 | // See comment for ASPSOldGen::available_for_contraction() |
111 | // for reasons the "increment" fraction is used. |
112 | PSAdaptiveSizePolicy* policy = heap->size_policy(); |
113 | size_t result = policy->eden_increment_aligned_down(max_contraction); |
114 | size_t result_aligned = align_down(result, GenAlignment); |
115 | |
116 | log_trace(gc, ergo)("ASPSYoungGen::available_for_contraction: " SIZE_FORMAT " K" , result_aligned/K); |
117 | log_trace(gc, ergo)(" max_contraction " SIZE_FORMAT " K" , max_contraction/K); |
118 | log_trace(gc, ergo)(" eden_avail " SIZE_FORMAT " K" , eden_avail/K); |
119 | log_trace(gc, ergo)(" gen_avail " SIZE_FORMAT " K" , gen_avail/K); |
120 | |
121 | return result_aligned; |
122 | } |
123 | |
124 | return 0; |
125 | } |
126 | |
127 | // The current implementation only considers to the end of eden. |
128 | // If to_space is below from_space, to_space is not considered. |
129 | // to_space can be. |
130 | size_t ASPSYoungGen::available_to_live() { |
131 | const size_t alignment = SpaceAlignment; |
132 | |
133 | // Include any space that is committed but is not in eden. |
134 | size_t available = pointer_delta(eden_space()->bottom(), |
135 | virtual_space()->low(), |
136 | sizeof(char)); |
137 | |
138 | const size_t eden_capacity = eden_space()->capacity_in_bytes(); |
139 | if (eden_space()->is_empty() && eden_capacity > alignment) { |
140 | available += eden_capacity - alignment; |
141 | } |
142 | return available; |
143 | } |
144 | |
145 | // Similar to PSYoungGen::resize_generation() but |
146 | // allows sum of eden_size and 2 * survivor_size to exceed _max_gen_size |
147 | // expands at the low end of the virtual space |
148 | // moves the boundary between the generations in order to expand |
149 | // some additional diagnostics |
150 | // If no additional changes are required, this can be deleted |
151 | // and the changes factored back into PSYoungGen::resize_generation(). |
152 | bool ASPSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) { |
153 | const size_t alignment = virtual_space()->alignment(); |
154 | size_t orig_size = virtual_space()->committed_size(); |
155 | bool size_changed = false; |
156 | |
157 | // There used to be a guarantee here that |
158 | // (eden_size + 2*survivor_size) <= _max_gen_size |
159 | // This requirement is enforced by the calculation of desired_size |
160 | // below. It may not be true on entry since the size of the |
161 | // eden_size is no bounded by the generation size. |
162 | |
163 | assert(max_size() == reserved().byte_size(), "max gen size problem?" ); |
164 | assert(min_gen_size() <= orig_size && orig_size <= max_size(), |
165 | "just checking" ); |
166 | |
167 | // Adjust new generation size |
168 | const size_t eden_plus_survivors = |
169 | align_up(eden_size + 2 * survivor_size, alignment); |
170 | size_t desired_size = MAX2(MIN2(eden_plus_survivors, gen_size_limit()), |
171 | min_gen_size()); |
172 | assert(desired_size <= gen_size_limit(), "just checking" ); |
173 | |
174 | if (desired_size > orig_size) { |
175 | // Grow the generation |
176 | size_t change = desired_size - orig_size; |
177 | HeapWord* prev_low = (HeapWord*) virtual_space()->low(); |
178 | if (!virtual_space()->expand_by(change)) { |
179 | return false; |
180 | } |
181 | if (ZapUnusedHeapArea) { |
182 | // Mangle newly committed space immediately because it |
183 | // can be done here more simply that after the new |
184 | // spaces have been computed. |
185 | HeapWord* new_low = (HeapWord*) virtual_space()->low(); |
186 | assert(new_low < prev_low, "Did not grow" ); |
187 | |
188 | MemRegion mangle_region(new_low, prev_low); |
189 | SpaceMangler::mangle_region(mangle_region); |
190 | } |
191 | size_changed = true; |
192 | } else if (desired_size < orig_size) { |
193 | size_t desired_change = orig_size - desired_size; |
194 | |
195 | // How much is available for shrinking. |
196 | size_t available_bytes = limit_gen_shrink(desired_change); |
197 | size_t change = MIN2(desired_change, available_bytes); |
198 | virtual_space()->shrink_by(change); |
199 | size_changed = true; |
200 | } else { |
201 | if (orig_size == gen_size_limit()) { |
202 | log_trace(gc)("ASPSYoung generation size at maximum: " SIZE_FORMAT "K" , orig_size/K); |
203 | } else if (orig_size == min_gen_size()) { |
204 | log_trace(gc)("ASPSYoung generation size at minium: " SIZE_FORMAT "K" , orig_size/K); |
205 | } |
206 | } |
207 | |
208 | if (size_changed) { |
209 | reset_after_change(); |
210 | log_trace(gc)("ASPSYoung generation size changed: " SIZE_FORMAT "K->" SIZE_FORMAT "K" , |
211 | orig_size/K, virtual_space()->committed_size()/K); |
212 | } |
213 | |
214 | guarantee(eden_plus_survivors <= virtual_space()->committed_size() || |
215 | virtual_space()->committed_size() == max_size(), "Sanity" ); |
216 | |
217 | return true; |
218 | } |
219 | |
220 | // Similar to PSYoungGen::resize_spaces() but |
221 | // eden always starts at the low end of the committed virtual space |
222 | // current implementation does not allow holes between the spaces |
223 | // _young_generation_boundary has to be reset because it changes. |
224 | // so additional verification |
225 | |
226 | void ASPSYoungGen::resize_spaces(size_t requested_eden_size, |
227 | size_t requested_survivor_size) { |
228 | assert(UseAdaptiveSizePolicy, "sanity check" ); |
229 | assert(requested_eden_size > 0 && requested_survivor_size > 0, |
230 | "just checking" ); |
231 | |
232 | space_invariants(); |
233 | |
234 | // We require eden and to space to be empty |
235 | if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) { |
236 | return; |
237 | } |
238 | |
239 | log_trace(gc, ergo)("PSYoungGen::resize_spaces(requested_eden_size: " |
240 | SIZE_FORMAT |
241 | ", requested_survivor_size: " SIZE_FORMAT ")" , |
242 | requested_eden_size, requested_survivor_size); |
243 | log_trace(gc, ergo)(" eden: [" PTR_FORMAT ".." PTR_FORMAT ") " |
244 | SIZE_FORMAT, |
245 | p2i(eden_space()->bottom()), |
246 | p2i(eden_space()->end()), |
247 | pointer_delta(eden_space()->end(), eden_space()->bottom(), sizeof(char))); |
248 | log_trace(gc, ergo)(" from: [" PTR_FORMAT ".." PTR_FORMAT ") " |
249 | SIZE_FORMAT, |
250 | p2i(from_space()->bottom()), |
251 | p2i(from_space()->end()), |
252 | pointer_delta(from_space()->end(), from_space()->bottom(), sizeof(char))); |
253 | log_trace(gc, ergo)(" to: [" PTR_FORMAT ".." PTR_FORMAT ") " |
254 | SIZE_FORMAT, |
255 | p2i(to_space()->bottom()), |
256 | p2i(to_space()->end()), |
257 | pointer_delta( to_space()->end(), to_space()->bottom(), sizeof(char))); |
258 | |
259 | // There's nothing to do if the new sizes are the same as the current |
260 | if (requested_survivor_size == to_space()->capacity_in_bytes() && |
261 | requested_survivor_size == from_space()->capacity_in_bytes() && |
262 | requested_eden_size == eden_space()->capacity_in_bytes()) { |
263 | log_trace(gc, ergo)(" capacities are the right sizes, returning" ); |
264 | return; |
265 | } |
266 | |
267 | char* eden_start = (char*)virtual_space()->low(); |
268 | char* eden_end = (char*)eden_space()->end(); |
269 | char* from_start = (char*)from_space()->bottom(); |
270 | char* from_end = (char*)from_space()->end(); |
271 | char* to_start = (char*)to_space()->bottom(); |
272 | char* to_end = (char*)to_space()->end(); |
273 | |
274 | assert(eden_start < from_start, "Cannot push into from_space" ); |
275 | |
276 | ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
277 | const bool maintain_minimum = |
278 | (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size(); |
279 | |
280 | bool eden_from_to_order = from_start < to_start; |
281 | // Check whether from space is below to space |
282 | if (eden_from_to_order) { |
283 | // Eden, from, to |
284 | |
285 | log_trace(gc, ergo)(" Eden, from, to:" ); |
286 | |
287 | // Set eden |
288 | // "requested_eden_size" is a goal for the size of eden |
289 | // and may not be attainable. "eden_size" below is |
290 | // calculated based on the location of from-space and |
291 | // the goal for the size of eden. from-space is |
292 | // fixed in place because it contains live data. |
293 | // The calculation is done this way to avoid 32bit |
294 | // overflow (i.e., eden_start + requested_eden_size |
295 | // may too large for representation in 32bits). |
296 | size_t eden_size; |
297 | if (maintain_minimum) { |
298 | // Only make eden larger than the requested size if |
299 | // the minimum size of the generation has to be maintained. |
300 | // This could be done in general but policy at a higher |
301 | // level is determining a requested size for eden and that |
302 | // should be honored unless there is a fundamental reason. |
303 | eden_size = pointer_delta(from_start, |
304 | eden_start, |
305 | sizeof(char)); |
306 | } else { |
307 | eden_size = MIN2(requested_eden_size, |
308 | pointer_delta(from_start, eden_start, sizeof(char))); |
309 | } |
310 | |
311 | eden_end = eden_start + eden_size; |
312 | assert(eden_end >= eden_start, "addition overflowed" ); |
313 | |
314 | // To may resize into from space as long as it is clear of live data. |
315 | // From space must remain page aligned, though, so we need to do some |
316 | // extra calculations. |
317 | |
318 | // First calculate an optimal to-space |
319 | to_end = (char*)virtual_space()->high(); |
320 | to_start = (char*)pointer_delta(to_end, |
321 | (char*)requested_survivor_size, |
322 | sizeof(char)); |
323 | |
324 | // Does the optimal to-space overlap from-space? |
325 | if (to_start < (char*)from_space()->end()) { |
326 | // Calculate the minimum offset possible for from_end |
327 | size_t from_size = |
328 | pointer_delta(from_space()->top(), from_start, sizeof(char)); |
329 | |
330 | // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME! |
331 | if (from_size == 0) { |
332 | from_size = SpaceAlignment; |
333 | } else { |
334 | from_size = align_up(from_size, SpaceAlignment); |
335 | } |
336 | |
337 | from_end = from_start + from_size; |
338 | assert(from_end > from_start, "addition overflow or from_size problem" ); |
339 | |
340 | guarantee(from_end <= (char*)from_space()->end(), |
341 | "from_end moved to the right" ); |
342 | |
343 | // Now update to_start with the new from_end |
344 | to_start = MAX2(from_end, to_start); |
345 | } |
346 | |
347 | guarantee(to_start != to_end, "to space is zero sized" ); |
348 | |
349 | log_trace(gc, ergo)(" [eden_start .. eden_end): " |
350 | "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
351 | p2i(eden_start), |
352 | p2i(eden_end), |
353 | pointer_delta(eden_end, eden_start, sizeof(char))); |
354 | log_trace(gc, ergo)(" [from_start .. from_end): " |
355 | "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
356 | p2i(from_start), |
357 | p2i(from_end), |
358 | pointer_delta(from_end, from_start, sizeof(char))); |
359 | log_trace(gc, ergo)(" [ to_start .. to_end): " |
360 | "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
361 | p2i(to_start), |
362 | p2i(to_end), |
363 | pointer_delta( to_end, to_start, sizeof(char))); |
364 | } else { |
365 | // Eden, to, from |
366 | log_trace(gc, ergo)(" Eden, to, from:" ); |
367 | |
368 | // To space gets priority over eden resizing. Note that we position |
369 | // to space as if we were able to resize from space, even though from |
370 | // space is not modified. |
371 | // Giving eden priority was tried and gave poorer performance. |
372 | to_end = (char*)pointer_delta(virtual_space()->high(), |
373 | (char*)requested_survivor_size, |
374 | sizeof(char)); |
375 | to_end = MIN2(to_end, from_start); |
376 | to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, |
377 | sizeof(char)); |
378 | // if the space sizes are to be increased by several times then |
379 | // 'to_start' will point beyond the young generation. In this case |
380 | // 'to_start' should be adjusted. |
381 | to_start = MAX2(to_start, eden_start + SpaceAlignment); |
382 | |
383 | // Compute how big eden can be, then adjust end. |
384 | // See comments above on calculating eden_end. |
385 | size_t eden_size; |
386 | if (maintain_minimum) { |
387 | eden_size = pointer_delta(to_start, eden_start, sizeof(char)); |
388 | } else { |
389 | eden_size = MIN2(requested_eden_size, |
390 | pointer_delta(to_start, eden_start, sizeof(char))); |
391 | } |
392 | eden_end = eden_start + eden_size; |
393 | assert(eden_end >= eden_start, "addition overflowed" ); |
394 | |
395 | // Don't let eden shrink down to 0 or less. |
396 | eden_end = MAX2(eden_end, eden_start + SpaceAlignment); |
397 | to_start = MAX2(to_start, eden_end); |
398 | |
399 | log_trace(gc, ergo)(" [eden_start .. eden_end): " |
400 | "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
401 | p2i(eden_start), |
402 | p2i(eden_end), |
403 | pointer_delta(eden_end, eden_start, sizeof(char))); |
404 | log_trace(gc, ergo)(" [ to_start .. to_end): " |
405 | "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
406 | p2i(to_start), |
407 | p2i(to_end), |
408 | pointer_delta( to_end, to_start, sizeof(char))); |
409 | log_trace(gc, ergo)(" [from_start .. from_end): " |
410 | "[" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
411 | p2i(from_start), |
412 | p2i(from_end), |
413 | pointer_delta(from_end, from_start, sizeof(char))); |
414 | } |
415 | |
416 | |
417 | guarantee((HeapWord*)from_start <= from_space()->bottom(), |
418 | "from start moved to the right" ); |
419 | guarantee((HeapWord*)from_end >= from_space()->top(), |
420 | "from end moved into live data" ); |
421 | assert(is_object_aligned(eden_start), "checking alignment" ); |
422 | assert(is_object_aligned(from_start), "checking alignment" ); |
423 | assert(is_object_aligned(to_start), "checking alignment" ); |
424 | |
425 | MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end); |
426 | MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); |
427 | MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end); |
428 | |
429 | // Let's make sure the call to initialize doesn't reset "top"! |
430 | DEBUG_ONLY(HeapWord* old_from_top = from_space()->top();) |
431 | |
432 | // For logging block below |
433 | size_t old_from = from_space()->capacity_in_bytes(); |
434 | size_t old_to = to_space()->capacity_in_bytes(); |
435 | |
436 | if (ZapUnusedHeapArea) { |
437 | // NUMA is a special case because a numa space is not mangled |
438 | // in order to not prematurely bind its address to memory to |
439 | // the wrong memory (i.e., don't want the GC thread to first |
440 | // touch the memory). The survivor spaces are not numa |
441 | // spaces and are mangled. |
442 | if (UseNUMA) { |
443 | if (eden_from_to_order) { |
444 | mangle_survivors(from_space(), fromMR, to_space(), toMR); |
445 | } else { |
446 | mangle_survivors(to_space(), toMR, from_space(), fromMR); |
447 | } |
448 | } |
449 | |
450 | // If not mangling the spaces, do some checking to verify that |
451 | // the spaces are already mangled. |
452 | // The spaces should be correctly mangled at this point so |
453 | // do some checking here. Note that they are not being mangled |
454 | // in the calls to initialize(). |
455 | // Must check mangling before the spaces are reshaped. Otherwise, |
456 | // the bottom or end of one space may have moved into an area |
457 | // covered by another space and a failure of the check may |
458 | // not correctly indicate which space is not properly mangled. |
459 | |
460 | HeapWord* limit = (HeapWord*) virtual_space()->high(); |
461 | eden_space()->check_mangled_unused_area(limit); |
462 | from_space()->check_mangled_unused_area(limit); |
463 | to_space()->check_mangled_unused_area(limit); |
464 | } |
465 | // When an existing space is being initialized, it is not |
466 | // mangled because the space has been previously mangled. |
467 | eden_space()->initialize(edenMR, |
468 | SpaceDecorator::Clear, |
469 | SpaceDecorator::DontMangle); |
470 | to_space()->initialize(toMR, |
471 | SpaceDecorator::Clear, |
472 | SpaceDecorator::DontMangle); |
473 | from_space()->initialize(fromMR, |
474 | SpaceDecorator::DontClear, |
475 | SpaceDecorator::DontMangle); |
476 | |
477 | PSScavenge::set_young_generation_boundary(eden_space()->bottom()); |
478 | |
479 | assert(from_space()->top() == old_from_top, "from top changed!" ); |
480 | |
481 | log_trace(gc, ergo)("AdaptiveSizePolicy::survivor space sizes: " |
482 | "collection: %d " |
483 | "(" SIZE_FORMAT ", " SIZE_FORMAT ") -> " |
484 | "(" SIZE_FORMAT ", " SIZE_FORMAT ") " , |
485 | ParallelScavengeHeap::heap()->total_collections(), |
486 | old_from, old_to, |
487 | from_space()->capacity_in_bytes(), |
488 | to_space()->capacity_in_bytes()); |
489 | |
490 | space_invariants(); |
491 | } |
492 | void ASPSYoungGen::reset_after_change() { |
493 | assert_locked_or_safepoint(Heap_lock); |
494 | |
495 | _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), |
496 | (HeapWord*)virtual_space()->high_boundary()); |
497 | PSScavenge::set_subject_to_discovery_span(_reserved); |
498 | |
499 | HeapWord* new_eden_bottom = (HeapWord*)virtual_space()->low(); |
500 | HeapWord* eden_bottom = eden_space()->bottom(); |
501 | if (new_eden_bottom != eden_bottom) { |
502 | MemRegion eden_mr(new_eden_bottom, eden_space()->end()); |
503 | eden_space()->initialize(eden_mr, |
504 | SpaceDecorator::Clear, |
505 | SpaceDecorator::Mangle); |
506 | PSScavenge::set_young_generation_boundary(eden_space()->bottom()); |
507 | } |
508 | MemRegion cmr((HeapWord*)virtual_space()->low(), |
509 | (HeapWord*)virtual_space()->high()); |
510 | ParallelScavengeHeap::heap()->barrier_set()->card_table()->resize_covered_region(cmr); |
511 | |
512 | space_invariants(); |
513 | } |
514 | |