1 | /* |
2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/parallel/mutableNUMASpace.hpp" |
27 | #include "gc/parallel/parallelScavengeHeap.hpp" |
28 | #include "gc/parallel/psMarkSweepDecorator.hpp" |
29 | #include "gc/parallel/psScavenge.hpp" |
30 | #include "gc/parallel/psYoungGen.hpp" |
31 | #include "gc/shared/gcUtil.hpp" |
32 | #include "gc/shared/genArguments.hpp" |
33 | #include "gc/shared/spaceDecorator.hpp" |
34 | #include "logging/log.hpp" |
35 | #include "oops/oop.inline.hpp" |
36 | #include "runtime/java.hpp" |
37 | #include "utilities/align.hpp" |
38 | |
39 | PSYoungGen::PSYoungGen(size_t initial_size, size_t min_size, size_t max_size) : |
40 | _reserved(), |
41 | _virtual_space(NULL), |
42 | _eden_space(NULL), |
43 | _from_space(NULL), |
44 | _to_space(NULL), |
45 | _eden_mark_sweep(NULL), |
46 | _from_mark_sweep(NULL), |
47 | _to_mark_sweep(NULL), |
48 | _init_gen_size(initial_size), |
49 | _min_gen_size(min_size), |
50 | _max_gen_size(max_size), |
51 | _gen_counters(NULL), |
52 | _eden_counters(NULL), |
53 | _from_counters(NULL), |
54 | _to_counters(NULL) |
55 | {} |
56 | |
57 | void PSYoungGen::initialize_virtual_space(ReservedSpace rs, size_t alignment) { |
58 | assert(_init_gen_size != 0, "Should have a finite size" ); |
59 | _virtual_space = new PSVirtualSpace(rs, alignment); |
60 | if (!virtual_space()->expand_by(_init_gen_size)) { |
61 | vm_exit_during_initialization("Could not reserve enough space for " |
62 | "object heap" ); |
63 | } |
64 | } |
65 | |
66 | void PSYoungGen::initialize(ReservedSpace rs, size_t alignment) { |
67 | initialize_virtual_space(rs, alignment); |
68 | initialize_work(); |
69 | } |
70 | |
71 | void PSYoungGen::initialize_work() { |
72 | |
73 | _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), |
74 | (HeapWord*)virtual_space()->high_boundary()); |
75 | |
76 | MemRegion cmr((HeapWord*)virtual_space()->low(), |
77 | (HeapWord*)virtual_space()->high()); |
78 | ParallelScavengeHeap::heap()->card_table()->resize_covered_region(cmr); |
79 | |
80 | if (ZapUnusedHeapArea) { |
81 | // Mangle newly committed space immediately because it |
82 | // can be done here more simply that after the new |
83 | // spaces have been computed. |
84 | SpaceMangler::mangle_region(cmr); |
85 | } |
86 | |
87 | if (UseNUMA) { |
88 | _eden_space = new MutableNUMASpace(virtual_space()->alignment()); |
89 | } else { |
90 | _eden_space = new MutableSpace(virtual_space()->alignment()); |
91 | } |
92 | _from_space = new MutableSpace(virtual_space()->alignment()); |
93 | _to_space = new MutableSpace(virtual_space()->alignment()); |
94 | |
95 | if (_eden_space == NULL || _from_space == NULL || _to_space == NULL) { |
96 | vm_exit_during_initialization("Could not allocate a young gen space" ); |
97 | } |
98 | |
99 | // Allocate the mark sweep views of spaces |
100 | _eden_mark_sweep = |
101 | new PSMarkSweepDecorator(_eden_space, NULL, MarkSweepDeadRatio); |
102 | _from_mark_sweep = |
103 | new PSMarkSweepDecorator(_from_space, NULL, MarkSweepDeadRatio); |
104 | _to_mark_sweep = |
105 | new PSMarkSweepDecorator(_to_space, NULL, MarkSweepDeadRatio); |
106 | |
107 | if (_eden_mark_sweep == NULL || |
108 | _from_mark_sweep == NULL || |
109 | _to_mark_sweep == NULL) { |
110 | vm_exit_during_initialization("Could not complete allocation" |
111 | " of the young generation" ); |
112 | } |
113 | |
114 | // Generation Counters - generation 0, 3 subspaces |
115 | _gen_counters = new PSGenerationCounters("new" , 0, 3, _min_gen_size, |
116 | _max_gen_size, _virtual_space); |
117 | |
118 | // Compute maximum space sizes for performance counters |
119 | size_t alignment = SpaceAlignment; |
120 | size_t size = virtual_space()->reserved_size(); |
121 | |
122 | size_t max_survivor_size; |
123 | size_t max_eden_size; |
124 | |
125 | if (UseAdaptiveSizePolicy) { |
126 | max_survivor_size = size / MinSurvivorRatio; |
127 | |
128 | // round the survivor space size down to the nearest alignment |
129 | // and make sure its size is greater than 0. |
130 | max_survivor_size = align_down(max_survivor_size, alignment); |
131 | max_survivor_size = MAX2(max_survivor_size, alignment); |
132 | |
133 | // set the maximum size of eden to be the size of the young gen |
134 | // less two times the minimum survivor size. The minimum survivor |
135 | // size for UseAdaptiveSizePolicy is one alignment. |
136 | max_eden_size = size - 2 * alignment; |
137 | } else { |
138 | max_survivor_size = size / InitialSurvivorRatio; |
139 | |
140 | // round the survivor space size down to the nearest alignment |
141 | // and make sure its size is greater than 0. |
142 | max_survivor_size = align_down(max_survivor_size, alignment); |
143 | max_survivor_size = MAX2(max_survivor_size, alignment); |
144 | |
145 | // set the maximum size of eden to be the size of the young gen |
146 | // less two times the survivor size when the generation is 100% |
147 | // committed. The minimum survivor size for -UseAdaptiveSizePolicy |
148 | // is dependent on the committed portion (current capacity) of the |
149 | // generation - the less space committed, the smaller the survivor |
150 | // space, possibly as small as an alignment. However, we are interested |
151 | // in the case where the young generation is 100% committed, as this |
152 | // is the point where eden reaches its maximum size. At this point, |
153 | // the size of a survivor space is max_survivor_size. |
154 | max_eden_size = size - 2 * max_survivor_size; |
155 | } |
156 | |
157 | _eden_counters = new SpaceCounters("eden" , 0, max_eden_size, _eden_space, |
158 | _gen_counters); |
159 | _from_counters = new SpaceCounters("s0" , 1, max_survivor_size, _from_space, |
160 | _gen_counters); |
161 | _to_counters = new SpaceCounters("s1" , 2, max_survivor_size, _to_space, |
162 | _gen_counters); |
163 | |
164 | compute_initial_space_boundaries(); |
165 | } |
166 | |
167 | void PSYoungGen::compute_initial_space_boundaries() { |
168 | // Compute sizes |
169 | size_t size = virtual_space()->committed_size(); |
170 | assert(size >= 3 * SpaceAlignment, "Young space is not large enough for eden + 2 survivors" ); |
171 | |
172 | size_t survivor_size = size / InitialSurvivorRatio; |
173 | survivor_size = align_down(survivor_size, SpaceAlignment); |
174 | // ... but never less than an alignment |
175 | survivor_size = MAX2(survivor_size, SpaceAlignment); |
176 | |
177 | // Young generation is eden + 2 survivor spaces |
178 | size_t eden_size = size - (2 * survivor_size); |
179 | |
180 | // Now go ahead and set 'em. |
181 | set_space_boundaries(eden_size, survivor_size); |
182 | space_invariants(); |
183 | |
184 | if (UsePerfData) { |
185 | _eden_counters->update_capacity(); |
186 | _from_counters->update_capacity(); |
187 | _to_counters->update_capacity(); |
188 | } |
189 | } |
190 | |
191 | void PSYoungGen::set_space_boundaries(size_t eden_size, size_t survivor_size) { |
192 | assert(eden_size < virtual_space()->committed_size(), "just checking" ); |
193 | assert(eden_size > 0 && survivor_size > 0, "just checking" ); |
194 | |
195 | // Initial layout is Eden, to, from. After swapping survivor spaces, |
196 | // that leaves us with Eden, from, to, which is step one in our two |
197 | // step resize-with-live-data procedure. |
198 | char *eden_start = virtual_space()->low(); |
199 | char *to_start = eden_start + eden_size; |
200 | char *from_start = to_start + survivor_size; |
201 | char *from_end = from_start + survivor_size; |
202 | |
203 | assert(from_end == virtual_space()->high(), "just checking" ); |
204 | assert(is_object_aligned(eden_start), "checking alignment" ); |
205 | assert(is_object_aligned(to_start), "checking alignment" ); |
206 | assert(is_object_aligned(from_start), "checking alignment" ); |
207 | |
208 | MemRegion eden_mr((HeapWord*)eden_start, (HeapWord*)to_start); |
209 | MemRegion to_mr ((HeapWord*)to_start, (HeapWord*)from_start); |
210 | MemRegion from_mr((HeapWord*)from_start, (HeapWord*)from_end); |
211 | |
212 | eden_space()->initialize(eden_mr, true, ZapUnusedHeapArea); |
213 | to_space()->initialize(to_mr , true, ZapUnusedHeapArea); |
214 | from_space()->initialize(from_mr, true, ZapUnusedHeapArea); |
215 | } |
216 | |
217 | #ifndef PRODUCT |
218 | void PSYoungGen::space_invariants() { |
219 | // Currently, our eden size cannot shrink to zero |
220 | guarantee(eden_space()->capacity_in_bytes() >= SpaceAlignment, "eden too small" ); |
221 | guarantee(from_space()->capacity_in_bytes() >= SpaceAlignment, "from too small" ); |
222 | guarantee(to_space()->capacity_in_bytes() >= SpaceAlignment, "to too small" ); |
223 | |
224 | // Relationship of spaces to each other |
225 | char* eden_start = (char*)eden_space()->bottom(); |
226 | char* eden_end = (char*)eden_space()->end(); |
227 | char* from_start = (char*)from_space()->bottom(); |
228 | char* from_end = (char*)from_space()->end(); |
229 | char* to_start = (char*)to_space()->bottom(); |
230 | char* to_end = (char*)to_space()->end(); |
231 | |
232 | guarantee(eden_start >= virtual_space()->low(), "eden bottom" ); |
233 | guarantee(eden_start < eden_end, "eden space consistency" ); |
234 | guarantee(from_start < from_end, "from space consistency" ); |
235 | guarantee(to_start < to_end, "to space consistency" ); |
236 | |
237 | // Check whether from space is below to space |
238 | if (from_start < to_start) { |
239 | // Eden, from, to |
240 | guarantee(eden_end <= from_start, "eden/from boundary" ); |
241 | guarantee(from_end <= to_start, "from/to boundary" ); |
242 | guarantee(to_end <= virtual_space()->high(), "to end" ); |
243 | } else { |
244 | // Eden, to, from |
245 | guarantee(eden_end <= to_start, "eden/to boundary" ); |
246 | guarantee(to_end <= from_start, "to/from boundary" ); |
247 | guarantee(from_end <= virtual_space()->high(), "from end" ); |
248 | } |
249 | |
250 | // More checks that the virtual space is consistent with the spaces |
251 | assert(virtual_space()->committed_size() >= |
252 | (eden_space()->capacity_in_bytes() + |
253 | to_space()->capacity_in_bytes() + |
254 | from_space()->capacity_in_bytes()), "Committed size is inconsistent" ); |
255 | assert(virtual_space()->committed_size() <= virtual_space()->reserved_size(), |
256 | "Space invariant" ); |
257 | char* eden_top = (char*)eden_space()->top(); |
258 | char* from_top = (char*)from_space()->top(); |
259 | char* to_top = (char*)to_space()->top(); |
260 | assert(eden_top <= virtual_space()->high(), "eden top" ); |
261 | assert(from_top <= virtual_space()->high(), "from top" ); |
262 | assert(to_top <= virtual_space()->high(), "to top" ); |
263 | |
264 | virtual_space()->verify(); |
265 | } |
266 | #endif |
267 | |
268 | void PSYoungGen::resize(size_t eden_size, size_t survivor_size) { |
269 | // Resize the generation if needed. If the generation resize |
270 | // reports false, do not attempt to resize the spaces. |
271 | if (resize_generation(eden_size, survivor_size)) { |
272 | // Then we lay out the spaces inside the generation |
273 | resize_spaces(eden_size, survivor_size); |
274 | |
275 | space_invariants(); |
276 | |
277 | log_trace(gc, ergo)("Young generation size: " |
278 | "desired eden: " SIZE_FORMAT " survivor: " SIZE_FORMAT |
279 | " used: " SIZE_FORMAT " capacity: " SIZE_FORMAT |
280 | " gen limits: " SIZE_FORMAT " / " SIZE_FORMAT, |
281 | eden_size, survivor_size, used_in_bytes(), capacity_in_bytes(), |
282 | _max_gen_size, min_gen_size()); |
283 | } |
284 | } |
285 | |
286 | |
287 | bool PSYoungGen::resize_generation(size_t eden_size, size_t survivor_size) { |
288 | const size_t alignment = virtual_space()->alignment(); |
289 | size_t orig_size = virtual_space()->committed_size(); |
290 | bool size_changed = false; |
291 | |
292 | // There used to be this guarantee there. |
293 | // guarantee ((eden_size + 2*survivor_size) <= _max_gen_size, "incorrect input arguments"); |
294 | // Code below forces this requirement. In addition the desired eden |
295 | // size and desired survivor sizes are desired goals and may |
296 | // exceed the total generation size. |
297 | |
298 | assert(min_gen_size() <= orig_size && orig_size <= max_size(), "just checking" ); |
299 | |
300 | // Adjust new generation size |
301 | const size_t eden_plus_survivors = |
302 | align_up(eden_size + 2 * survivor_size, alignment); |
303 | size_t desired_size = MAX2(MIN2(eden_plus_survivors, max_size()), |
304 | min_gen_size()); |
305 | assert(desired_size <= max_size(), "just checking" ); |
306 | |
307 | if (desired_size > orig_size) { |
308 | // Grow the generation |
309 | size_t change = desired_size - orig_size; |
310 | assert(change % alignment == 0, "just checking" ); |
311 | HeapWord* prev_high = (HeapWord*) virtual_space()->high(); |
312 | if (!virtual_space()->expand_by(change)) { |
313 | return false; // Error if we fail to resize! |
314 | } |
315 | if (ZapUnusedHeapArea) { |
316 | // Mangle newly committed space immediately because it |
317 | // can be done here more simply that after the new |
318 | // spaces have been computed. |
319 | HeapWord* new_high = (HeapWord*) virtual_space()->high(); |
320 | MemRegion mangle_region(prev_high, new_high); |
321 | SpaceMangler::mangle_region(mangle_region); |
322 | } |
323 | size_changed = true; |
324 | } else if (desired_size < orig_size) { |
325 | size_t desired_change = orig_size - desired_size; |
326 | assert(desired_change % alignment == 0, "just checking" ); |
327 | |
328 | desired_change = limit_gen_shrink(desired_change); |
329 | |
330 | if (desired_change > 0) { |
331 | virtual_space()->shrink_by(desired_change); |
332 | reset_survivors_after_shrink(); |
333 | |
334 | size_changed = true; |
335 | } |
336 | } else { |
337 | if (orig_size == gen_size_limit()) { |
338 | log_trace(gc)("PSYoung generation size at maximum: " SIZE_FORMAT "K" , orig_size/K); |
339 | } else if (orig_size == min_gen_size()) { |
340 | log_trace(gc)("PSYoung generation size at minium: " SIZE_FORMAT "K" , orig_size/K); |
341 | } |
342 | } |
343 | |
344 | if (size_changed) { |
345 | post_resize(); |
346 | log_trace(gc)("PSYoung generation size changed: " SIZE_FORMAT "K->" SIZE_FORMAT "K" , |
347 | orig_size/K, virtual_space()->committed_size()/K); |
348 | } |
349 | |
350 | guarantee(eden_plus_survivors <= virtual_space()->committed_size() || |
351 | virtual_space()->committed_size() == max_size(), "Sanity" ); |
352 | |
353 | return true; |
354 | } |
355 | |
356 | #ifndef PRODUCT |
357 | // In the numa case eden is not mangled so a survivor space |
358 | // moving into a region previously occupied by a survivor |
359 | // may find an unmangled region. Also in the PS case eden |
360 | // to-space and from-space may not touch (i.e., there may be |
361 | // gaps between them due to movement while resizing the |
362 | // spaces). Those gaps must be mangled. |
363 | void PSYoungGen::mangle_survivors(MutableSpace* s1, |
364 | MemRegion s1MR, |
365 | MutableSpace* s2, |
366 | MemRegion s2MR) { |
367 | // Check eden and gap between eden and from-space, in deciding |
368 | // what to mangle in from-space. Check the gap between from-space |
369 | // and to-space when deciding what to mangle. |
370 | // |
371 | // +--------+ +----+ +---+ |
372 | // | eden | |s1 | |s2 | |
373 | // +--------+ +----+ +---+ |
374 | // +-------+ +-----+ |
375 | // |s1MR | |s2MR | |
376 | // +-------+ +-----+ |
377 | // All of survivor-space is properly mangled so find the |
378 | // upper bound on the mangling for any portion above current s1. |
379 | HeapWord* delta_end = MIN2(s1->bottom(), s1MR.end()); |
380 | MemRegion delta1_left; |
381 | if (s1MR.start() < delta_end) { |
382 | delta1_left = MemRegion(s1MR.start(), delta_end); |
383 | s1->mangle_region(delta1_left); |
384 | } |
385 | // Find any portion to the right of the current s1. |
386 | HeapWord* delta_start = MAX2(s1->end(), s1MR.start()); |
387 | MemRegion delta1_right; |
388 | if (delta_start < s1MR.end()) { |
389 | delta1_right = MemRegion(delta_start, s1MR.end()); |
390 | s1->mangle_region(delta1_right); |
391 | } |
392 | |
393 | // Similarly for the second survivor space except that |
394 | // any of the new region that overlaps with the current |
395 | // region of the first survivor space has already been |
396 | // mangled. |
397 | delta_end = MIN2(s2->bottom(), s2MR.end()); |
398 | delta_start = MAX2(s2MR.start(), s1->end()); |
399 | MemRegion delta2_left; |
400 | if (s2MR.start() < delta_end) { |
401 | delta2_left = MemRegion(s2MR.start(), delta_end); |
402 | s2->mangle_region(delta2_left); |
403 | } |
404 | delta_start = MAX2(s2->end(), s2MR.start()); |
405 | MemRegion delta2_right; |
406 | if (delta_start < s2MR.end()) { |
407 | s2->mangle_region(delta2_right); |
408 | } |
409 | |
410 | // s1 |
411 | log_develop_trace(gc)("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") " |
412 | "New region: [" PTR_FORMAT ", " PTR_FORMAT ")" , |
413 | p2i(s1->bottom()), p2i(s1->end()), |
414 | p2i(s1MR.start()), p2i(s1MR.end())); |
415 | log_develop_trace(gc)(" Mangle before: [" PTR_FORMAT ", " |
416 | PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")" , |
417 | p2i(delta1_left.start()), p2i(delta1_left.end()), |
418 | p2i(delta1_right.start()), p2i(delta1_right.end())); |
419 | |
420 | // s2 |
421 | log_develop_trace(gc)("Current region: [" PTR_FORMAT ", " PTR_FORMAT ") " |
422 | "New region: [" PTR_FORMAT ", " PTR_FORMAT ")" , |
423 | p2i(s2->bottom()), p2i(s2->end()), |
424 | p2i(s2MR.start()), p2i(s2MR.end())); |
425 | log_develop_trace(gc)(" Mangle before: [" PTR_FORMAT ", " |
426 | PTR_FORMAT ") Mangle after: [" PTR_FORMAT ", " PTR_FORMAT ")" , |
427 | p2i(delta2_left.start()), p2i(delta2_left.end()), |
428 | p2i(delta2_right.start()), p2i(delta2_right.end())); |
429 | } |
430 | #endif // NOT PRODUCT |
431 | |
432 | void PSYoungGen::resize_spaces(size_t requested_eden_size, |
433 | size_t requested_survivor_size) { |
434 | assert(UseAdaptiveSizePolicy, "sanity check" ); |
435 | assert(requested_eden_size > 0 && requested_survivor_size > 0, |
436 | "just checking" ); |
437 | |
438 | // We require eden and to space to be empty |
439 | if ((!eden_space()->is_empty()) || (!to_space()->is_empty())) { |
440 | return; |
441 | } |
442 | |
443 | log_trace(gc, ergo)("PSYoungGen::resize_spaces(requested_eden_size: " SIZE_FORMAT ", requested_survivor_size: " SIZE_FORMAT ")" , |
444 | requested_eden_size, requested_survivor_size); |
445 | log_trace(gc, ergo)(" eden: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT, |
446 | p2i(eden_space()->bottom()), |
447 | p2i(eden_space()->end()), |
448 | pointer_delta(eden_space()->end(), |
449 | eden_space()->bottom(), |
450 | sizeof(char))); |
451 | log_trace(gc, ergo)(" from: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT, |
452 | p2i(from_space()->bottom()), |
453 | p2i(from_space()->end()), |
454 | pointer_delta(from_space()->end(), |
455 | from_space()->bottom(), |
456 | sizeof(char))); |
457 | log_trace(gc, ergo)(" to: [" PTR_FORMAT ".." PTR_FORMAT ") " SIZE_FORMAT, |
458 | p2i(to_space()->bottom()), |
459 | p2i(to_space()->end()), |
460 | pointer_delta( to_space()->end(), |
461 | to_space()->bottom(), |
462 | sizeof(char))); |
463 | |
464 | // There's nothing to do if the new sizes are the same as the current |
465 | if (requested_survivor_size == to_space()->capacity_in_bytes() && |
466 | requested_survivor_size == from_space()->capacity_in_bytes() && |
467 | requested_eden_size == eden_space()->capacity_in_bytes()) { |
468 | log_trace(gc, ergo)(" capacities are the right sizes, returning" ); |
469 | return; |
470 | } |
471 | |
472 | char* eden_start = (char*)eden_space()->bottom(); |
473 | char* eden_end = (char*)eden_space()->end(); |
474 | char* from_start = (char*)from_space()->bottom(); |
475 | char* from_end = (char*)from_space()->end(); |
476 | char* to_start = (char*)to_space()->bottom(); |
477 | char* to_end = (char*)to_space()->end(); |
478 | |
479 | const bool maintain_minimum = |
480 | (requested_eden_size + 2 * requested_survivor_size) <= min_gen_size(); |
481 | |
482 | bool eden_from_to_order = from_start < to_start; |
483 | // Check whether from space is below to space |
484 | if (eden_from_to_order) { |
485 | // Eden, from, to |
486 | eden_from_to_order = true; |
487 | log_trace(gc, ergo)(" Eden, from, to:" ); |
488 | |
489 | // Set eden |
490 | // "requested_eden_size" is a goal for the size of eden |
491 | // and may not be attainable. "eden_size" below is |
492 | // calculated based on the location of from-space and |
493 | // the goal for the size of eden. from-space is |
494 | // fixed in place because it contains live data. |
495 | // The calculation is done this way to avoid 32bit |
496 | // overflow (i.e., eden_start + requested_eden_size |
497 | // may too large for representation in 32bits). |
498 | size_t eden_size; |
499 | if (maintain_minimum) { |
500 | // Only make eden larger than the requested size if |
501 | // the minimum size of the generation has to be maintained. |
502 | // This could be done in general but policy at a higher |
503 | // level is determining a requested size for eden and that |
504 | // should be honored unless there is a fundamental reason. |
505 | eden_size = pointer_delta(from_start, |
506 | eden_start, |
507 | sizeof(char)); |
508 | } else { |
509 | eden_size = MIN2(requested_eden_size, |
510 | pointer_delta(from_start, eden_start, sizeof(char))); |
511 | } |
512 | |
513 | eden_end = eden_start + eden_size; |
514 | assert(eden_end >= eden_start, "addition overflowed" ); |
515 | |
516 | // To may resize into from space as long as it is clear of live data. |
517 | // From space must remain page aligned, though, so we need to do some |
518 | // extra calculations. |
519 | |
520 | // First calculate an optimal to-space |
521 | to_end = (char*)virtual_space()->high(); |
522 | to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, |
523 | sizeof(char)); |
524 | |
525 | // Does the optimal to-space overlap from-space? |
526 | if (to_start < (char*)from_space()->end()) { |
527 | // Calculate the minimum offset possible for from_end |
528 | size_t from_size = pointer_delta(from_space()->top(), from_start, sizeof(char)); |
529 | |
530 | // Should we be in this method if from_space is empty? Why not the set_space method? FIX ME! |
531 | if (from_size == 0) { |
532 | from_size = SpaceAlignment; |
533 | } else { |
534 | from_size = align_up(from_size, SpaceAlignment); |
535 | } |
536 | |
537 | from_end = from_start + from_size; |
538 | assert(from_end > from_start, "addition overflow or from_size problem" ); |
539 | |
540 | guarantee(from_end <= (char*)from_space()->end(), "from_end moved to the right" ); |
541 | |
542 | // Now update to_start with the new from_end |
543 | to_start = MAX2(from_end, to_start); |
544 | } |
545 | |
546 | guarantee(to_start != to_end, "to space is zero sized" ); |
547 | |
548 | log_trace(gc, ergo)(" [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
549 | p2i(eden_start), |
550 | p2i(eden_end), |
551 | pointer_delta(eden_end, eden_start, sizeof(char))); |
552 | log_trace(gc, ergo)(" [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
553 | p2i(from_start), |
554 | p2i(from_end), |
555 | pointer_delta(from_end, from_start, sizeof(char))); |
556 | log_trace(gc, ergo)(" [ to_start .. to_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
557 | p2i(to_start), |
558 | p2i(to_end), |
559 | pointer_delta( to_end, to_start, sizeof(char))); |
560 | } else { |
561 | // Eden, to, from |
562 | log_trace(gc, ergo)(" Eden, to, from:" ); |
563 | |
564 | // To space gets priority over eden resizing. Note that we position |
565 | // to space as if we were able to resize from space, even though from |
566 | // space is not modified. |
567 | // Giving eden priority was tried and gave poorer performance. |
568 | to_end = (char*)pointer_delta(virtual_space()->high(), |
569 | (char*)requested_survivor_size, |
570 | sizeof(char)); |
571 | to_end = MIN2(to_end, from_start); |
572 | to_start = (char*)pointer_delta(to_end, (char*)requested_survivor_size, |
573 | sizeof(char)); |
574 | // if the space sizes are to be increased by several times then |
575 | // 'to_start' will point beyond the young generation. In this case |
576 | // 'to_start' should be adjusted. |
577 | to_start = MAX2(to_start, eden_start + SpaceAlignment); |
578 | |
579 | // Compute how big eden can be, then adjust end. |
580 | // See comments above on calculating eden_end. |
581 | size_t eden_size; |
582 | if (maintain_minimum) { |
583 | eden_size = pointer_delta(to_start, eden_start, sizeof(char)); |
584 | } else { |
585 | eden_size = MIN2(requested_eden_size, |
586 | pointer_delta(to_start, eden_start, sizeof(char))); |
587 | } |
588 | eden_end = eden_start + eden_size; |
589 | assert(eden_end >= eden_start, "addition overflowed" ); |
590 | |
591 | // Could choose to not let eden shrink |
592 | // to_start = MAX2(to_start, eden_end); |
593 | |
594 | // Don't let eden shrink down to 0 or less. |
595 | eden_end = MAX2(eden_end, eden_start + SpaceAlignment); |
596 | to_start = MAX2(to_start, eden_end); |
597 | |
598 | log_trace(gc, ergo)(" [eden_start .. eden_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
599 | p2i(eden_start), |
600 | p2i(eden_end), |
601 | pointer_delta(eden_end, eden_start, sizeof(char))); |
602 | log_trace(gc, ergo)(" [ to_start .. to_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
603 | p2i(to_start), |
604 | p2i(to_end), |
605 | pointer_delta( to_end, to_start, sizeof(char))); |
606 | log_trace(gc, ergo)(" [from_start .. from_end): [" PTR_FORMAT " .. " PTR_FORMAT ") " SIZE_FORMAT, |
607 | p2i(from_start), |
608 | p2i(from_end), |
609 | pointer_delta(from_end, from_start, sizeof(char))); |
610 | } |
611 | |
612 | |
613 | guarantee((HeapWord*)from_start <= from_space()->bottom(), |
614 | "from start moved to the right" ); |
615 | guarantee((HeapWord*)from_end >= from_space()->top(), |
616 | "from end moved into live data" ); |
617 | assert(is_object_aligned(eden_start), "checking alignment" ); |
618 | assert(is_object_aligned(from_start), "checking alignment" ); |
619 | assert(is_object_aligned(to_start), "checking alignment" ); |
620 | |
621 | MemRegion edenMR((HeapWord*)eden_start, (HeapWord*)eden_end); |
622 | MemRegion toMR ((HeapWord*)to_start, (HeapWord*)to_end); |
623 | MemRegion fromMR((HeapWord*)from_start, (HeapWord*)from_end); |
624 | |
625 | // Let's make sure the call to initialize doesn't reset "top"! |
626 | HeapWord* old_from_top = from_space()->top(); |
627 | |
628 | // For logging block below |
629 | size_t old_from = from_space()->capacity_in_bytes(); |
630 | size_t old_to = to_space()->capacity_in_bytes(); |
631 | |
632 | if (ZapUnusedHeapArea) { |
633 | // NUMA is a special case because a numa space is not mangled |
634 | // in order to not prematurely bind its address to memory to |
635 | // the wrong memory (i.e., don't want the GC thread to first |
636 | // touch the memory). The survivor spaces are not numa |
637 | // spaces and are mangled. |
638 | if (UseNUMA) { |
639 | if (eden_from_to_order) { |
640 | mangle_survivors(from_space(), fromMR, to_space(), toMR); |
641 | } else { |
642 | mangle_survivors(to_space(), toMR, from_space(), fromMR); |
643 | } |
644 | } |
645 | |
646 | // If not mangling the spaces, do some checking to verify that |
647 | // the spaces are already mangled. |
648 | // The spaces should be correctly mangled at this point so |
649 | // do some checking here. Note that they are not being mangled |
650 | // in the calls to initialize(). |
651 | // Must check mangling before the spaces are reshaped. Otherwise, |
652 | // the bottom or end of one space may have moved into an area |
653 | // covered by another space and a failure of the check may |
654 | // not correctly indicate which space is not properly mangled. |
655 | HeapWord* limit = (HeapWord*) virtual_space()->high(); |
656 | eden_space()->check_mangled_unused_area(limit); |
657 | from_space()->check_mangled_unused_area(limit); |
658 | to_space()->check_mangled_unused_area(limit); |
659 | } |
660 | // When an existing space is being initialized, it is not |
661 | // mangled because the space has been previously mangled. |
662 | eden_space()->initialize(edenMR, |
663 | SpaceDecorator::Clear, |
664 | SpaceDecorator::DontMangle); |
665 | to_space()->initialize(toMR, |
666 | SpaceDecorator::Clear, |
667 | SpaceDecorator::DontMangle); |
668 | from_space()->initialize(fromMR, |
669 | SpaceDecorator::DontClear, |
670 | SpaceDecorator::DontMangle); |
671 | |
672 | assert(from_space()->top() == old_from_top, "from top changed!" ); |
673 | |
674 | log_trace(gc, ergo)("AdaptiveSizePolicy::survivor space sizes: collection: %d (" SIZE_FORMAT ", " SIZE_FORMAT ") -> (" SIZE_FORMAT ", " SIZE_FORMAT ") " , |
675 | ParallelScavengeHeap::heap()->total_collections(), |
676 | old_from, old_to, |
677 | from_space()->capacity_in_bytes(), |
678 | to_space()->capacity_in_bytes()); |
679 | } |
680 | |
681 | void PSYoungGen::swap_spaces() { |
682 | MutableSpace* s = from_space(); |
683 | _from_space = to_space(); |
684 | _to_space = s; |
685 | |
686 | // Now update the decorators. |
687 | PSMarkSweepDecorator* md = from_mark_sweep(); |
688 | _from_mark_sweep = to_mark_sweep(); |
689 | _to_mark_sweep = md; |
690 | |
691 | assert(from_mark_sweep()->space() == from_space(), "Sanity" ); |
692 | assert(to_mark_sweep()->space() == to_space(), "Sanity" ); |
693 | } |
694 | |
695 | size_t PSYoungGen::capacity_in_bytes() const { |
696 | return eden_space()->capacity_in_bytes() |
697 | + from_space()->capacity_in_bytes(); // to_space() is only used during scavenge |
698 | } |
699 | |
700 | |
701 | size_t PSYoungGen::used_in_bytes() const { |
702 | return eden_space()->used_in_bytes() |
703 | + from_space()->used_in_bytes(); // to_space() is only used during scavenge |
704 | } |
705 | |
706 | |
707 | size_t PSYoungGen::free_in_bytes() const { |
708 | return eden_space()->free_in_bytes() |
709 | + from_space()->free_in_bytes(); // to_space() is only used during scavenge |
710 | } |
711 | |
712 | size_t PSYoungGen::capacity_in_words() const { |
713 | return eden_space()->capacity_in_words() |
714 | + from_space()->capacity_in_words(); // to_space() is only used during scavenge |
715 | } |
716 | |
717 | |
718 | size_t PSYoungGen::used_in_words() const { |
719 | return eden_space()->used_in_words() |
720 | + from_space()->used_in_words(); // to_space() is only used during scavenge |
721 | } |
722 | |
723 | |
724 | size_t PSYoungGen::free_in_words() const { |
725 | return eden_space()->free_in_words() |
726 | + from_space()->free_in_words(); // to_space() is only used during scavenge |
727 | } |
728 | |
729 | void PSYoungGen::object_iterate(ObjectClosure* blk) { |
730 | eden_space()->object_iterate(blk); |
731 | from_space()->object_iterate(blk); |
732 | to_space()->object_iterate(blk); |
733 | } |
734 | |
735 | #if INCLUDE_SERIALGC |
736 | |
737 | void PSYoungGen::precompact() { |
738 | eden_mark_sweep()->precompact(); |
739 | from_mark_sweep()->precompact(); |
740 | to_mark_sweep()->precompact(); |
741 | } |
742 | |
743 | void PSYoungGen::adjust_pointers() { |
744 | eden_mark_sweep()->adjust_pointers(); |
745 | from_mark_sweep()->adjust_pointers(); |
746 | to_mark_sweep()->adjust_pointers(); |
747 | } |
748 | |
749 | void PSYoungGen::compact() { |
750 | eden_mark_sweep()->compact(ZapUnusedHeapArea); |
751 | from_mark_sweep()->compact(ZapUnusedHeapArea); |
752 | // Mark sweep stores preserved markOops in to space, don't disturb! |
753 | to_mark_sweep()->compact(false); |
754 | } |
755 | |
756 | #endif // INCLUDE_SERIALGC |
757 | |
758 | void PSYoungGen::print() const { print_on(tty); } |
759 | void PSYoungGen::print_on(outputStream* st) const { |
760 | st->print(" %-15s" , "PSYoungGen" ); |
761 | st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K" , |
762 | capacity_in_bytes()/K, used_in_bytes()/K); |
763 | virtual_space()->print_space_boundaries_on(st); |
764 | st->print(" eden" ); eden_space()->print_on(st); |
765 | st->print(" from" ); from_space()->print_on(st); |
766 | st->print(" to " ); to_space()->print_on(st); |
767 | } |
768 | |
769 | // Note that a space is not printed before the [NAME: |
770 | void PSYoungGen::print_used_change(size_t prev_used) const { |
771 | log_info(gc, heap)("%s: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)" , |
772 | name(), prev_used / K, used_in_bytes() / K, capacity_in_bytes() / K); |
773 | } |
774 | |
775 | size_t PSYoungGen::available_for_expansion() { |
776 | ShouldNotReachHere(); |
777 | return 0; |
778 | } |
779 | |
780 | size_t PSYoungGen::available_for_contraction() { |
781 | ShouldNotReachHere(); |
782 | return 0; |
783 | } |
784 | |
785 | size_t PSYoungGen::available_to_min_gen() { |
786 | assert(virtual_space()->committed_size() >= min_gen_size(), "Invariant" ); |
787 | return virtual_space()->committed_size() - min_gen_size(); |
788 | } |
789 | |
790 | // This method assumes that from-space has live data and that |
791 | // any shrinkage of the young gen is limited by location of |
792 | // from-space. |
793 | size_t PSYoungGen::available_to_live() { |
794 | size_t delta_in_survivor = 0; |
795 | MutableSpace* space_shrinking = NULL; |
796 | if (from_space()->end() > to_space()->end()) { |
797 | space_shrinking = from_space(); |
798 | } else { |
799 | space_shrinking = to_space(); |
800 | } |
801 | |
802 | // Include any space that is committed but not included in |
803 | // the survivor spaces. |
804 | assert(((HeapWord*)virtual_space()->high()) >= space_shrinking->end(), |
805 | "Survivor space beyond high end" ); |
806 | size_t unused_committed = pointer_delta(virtual_space()->high(), |
807 | space_shrinking->end(), sizeof(char)); |
808 | |
809 | if (space_shrinking->is_empty()) { |
810 | // Don't let the space shrink to 0 |
811 | assert(space_shrinking->capacity_in_bytes() >= SpaceAlignment, |
812 | "Space is too small" ); |
813 | delta_in_survivor = space_shrinking->capacity_in_bytes() - SpaceAlignment; |
814 | } else { |
815 | delta_in_survivor = pointer_delta(space_shrinking->end(), |
816 | space_shrinking->top(), |
817 | sizeof(char)); |
818 | } |
819 | |
820 | size_t delta_in_bytes = unused_committed + delta_in_survivor; |
821 | delta_in_bytes = align_down(delta_in_bytes, GenAlignment); |
822 | return delta_in_bytes; |
823 | } |
824 | |
825 | // Return the number of bytes available for resizing down the young |
826 | // generation. This is the minimum of |
827 | // input "bytes" |
828 | // bytes to the minimum young gen size |
829 | // bytes to the size currently being used + some small extra |
830 | size_t PSYoungGen::limit_gen_shrink(size_t bytes) { |
831 | // Allow shrinkage into the current eden but keep eden large enough |
832 | // to maintain the minimum young gen size |
833 | bytes = MIN3(bytes, available_to_min_gen(), available_to_live()); |
834 | return align_down(bytes, virtual_space()->alignment()); |
835 | } |
836 | |
837 | void PSYoungGen::reset_after_change() { |
838 | ShouldNotReachHere(); |
839 | } |
840 | |
841 | void PSYoungGen::reset_survivors_after_shrink() { |
842 | _reserved = MemRegion((HeapWord*)virtual_space()->low_boundary(), |
843 | (HeapWord*)virtual_space()->high_boundary()); |
844 | PSScavenge::set_subject_to_discovery_span(_reserved); |
845 | |
846 | MutableSpace* space_shrinking = NULL; |
847 | if (from_space()->end() > to_space()->end()) { |
848 | space_shrinking = from_space(); |
849 | } else { |
850 | space_shrinking = to_space(); |
851 | } |
852 | |
853 | HeapWord* new_end = (HeapWord*)virtual_space()->high(); |
854 | assert(new_end >= space_shrinking->bottom(), "Shrink was too large" ); |
855 | // Was there a shrink of the survivor space? |
856 | if (new_end < space_shrinking->end()) { |
857 | MemRegion mr(space_shrinking->bottom(), new_end); |
858 | space_shrinking->initialize(mr, |
859 | SpaceDecorator::DontClear, |
860 | SpaceDecorator::Mangle); |
861 | } |
862 | } |
863 | |
864 | // This method currently does not expect to expand into eden (i.e., |
865 | // the virtual space boundaries is expected to be consistent |
866 | // with the eden boundaries.. |
867 | void PSYoungGen::post_resize() { |
868 | assert_locked_or_safepoint(Heap_lock); |
869 | assert((eden_space()->bottom() < to_space()->bottom()) && |
870 | (eden_space()->bottom() < from_space()->bottom()), |
871 | "Eden is assumed to be below the survivor spaces" ); |
872 | |
873 | MemRegion cmr((HeapWord*)virtual_space()->low(), |
874 | (HeapWord*)virtual_space()->high()); |
875 | ParallelScavengeHeap::heap()->card_table()->resize_covered_region(cmr); |
876 | space_invariants(); |
877 | } |
878 | |
879 | |
880 | |
881 | void PSYoungGen::update_counters() { |
882 | if (UsePerfData) { |
883 | _eden_counters->update_all(); |
884 | _from_counters->update_all(); |
885 | _to_counters->update_all(); |
886 | _gen_counters->update_all(); |
887 | } |
888 | } |
889 | |
890 | void PSYoungGen::verify() { |
891 | eden_space()->verify(); |
892 | from_space()->verify(); |
893 | to_space()->verify(); |
894 | } |
895 | |
896 | #ifndef PRODUCT |
897 | void PSYoungGen::record_spaces_top() { |
898 | assert(ZapUnusedHeapArea, "Not mangling unused space" ); |
899 | eden_space()->set_top_for_allocations(); |
900 | from_space()->set_top_for_allocations(); |
901 | to_space()->set_top_for_allocations(); |
902 | } |
903 | #endif |
904 | |