1 | /* |
2 | * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "classfile/systemDictionary.hpp" |
27 | #include "gc/parallel/objectStartArray.hpp" |
28 | #include "gc/parallel/parallelScavengeHeap.hpp" |
29 | #include "gc/parallel/parMarkBitMap.inline.hpp" |
30 | #include "gc/parallel/psMarkSweep.hpp" |
31 | #include "gc/parallel/psMarkSweepDecorator.hpp" |
32 | #include "gc/parallel/psParallelCompact.inline.hpp" |
33 | #include "gc/serial/markSweep.inline.hpp" |
34 | #include "gc/shared/spaceDecorator.hpp" |
35 | #include "memory/iterator.inline.hpp" |
36 | #include "oops/oop.inline.hpp" |
37 | #include "runtime/prefetch.inline.hpp" |
38 | |
39 | PSMarkSweepDecorator* PSMarkSweepDecorator::_destination_decorator = NULL; |
40 | |
41 | |
42 | void PSMarkSweepDecorator::set_destination_decorator_tenured() { |
43 | ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
44 | _destination_decorator = heap->old_gen()->object_mark_sweep(); |
45 | } |
46 | |
47 | void PSMarkSweepDecorator::advance_destination_decorator() { |
48 | ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
49 | |
50 | assert(_destination_decorator != NULL, "Sanity" ); |
51 | |
52 | PSMarkSweepDecorator* first = heap->old_gen()->object_mark_sweep(); |
53 | PSMarkSweepDecorator* second = heap->young_gen()->eden_mark_sweep(); |
54 | PSMarkSweepDecorator* third = heap->young_gen()->from_mark_sweep(); |
55 | PSMarkSweepDecorator* fourth = heap->young_gen()->to_mark_sweep(); |
56 | |
57 | if ( _destination_decorator == first ) { |
58 | _destination_decorator = second; |
59 | } else if ( _destination_decorator == second ) { |
60 | _destination_decorator = third; |
61 | } else if ( _destination_decorator == third ) { |
62 | _destination_decorator = fourth; |
63 | } else { |
64 | fatal("PSMarkSweep attempting to advance past last compaction area" ); |
65 | } |
66 | } |
67 | |
68 | PSMarkSweepDecorator* PSMarkSweepDecorator::destination_decorator() { |
69 | assert(_destination_decorator != NULL, "Sanity" ); |
70 | |
71 | return _destination_decorator; |
72 | } |
73 | |
74 | // FIX ME FIX ME FIX ME FIX ME!!!!!!!!! |
75 | // The object forwarding code is duplicated. Factor this out!!!!! |
76 | // |
77 | // This method "precompacts" objects inside its space to dest. It places forwarding |
78 | // pointers into markOops for use by adjust_pointers. If "dest" should overflow, we |
79 | // finish by compacting into our own space. |
80 | |
81 | void PSMarkSweepDecorator::precompact() { |
82 | // Reset our own compact top. |
83 | set_compaction_top(space()->bottom()); |
84 | |
85 | /* We allow some amount of garbage towards the bottom of the space, so |
86 | * we don't start compacting before there is a significant gain to be made. |
87 | * Occasionally, we want to ensure a full compaction, which is determined |
88 | * by the MarkSweepAlwaysCompactCount parameter. This is a significant |
89 | * performance improvement! |
90 | */ |
91 | bool skip_dead = ((PSMarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0); |
92 | |
93 | size_t allowed_deadspace = 0; |
94 | if (skip_dead) { |
95 | const size_t ratio = allowed_dead_ratio(); |
96 | allowed_deadspace = space()->capacity_in_words() * ratio / 100; |
97 | } |
98 | |
99 | // Fetch the current destination decorator |
100 | PSMarkSweepDecorator* dest = destination_decorator(); |
101 | ObjectStartArray* start_array = dest->start_array(); |
102 | |
103 | HeapWord* compact_top = dest->compaction_top(); |
104 | HeapWord* compact_end = dest->space()->end(); |
105 | |
106 | HeapWord* q = space()->bottom(); |
107 | HeapWord* t = space()->top(); |
108 | |
109 | HeapWord* end_of_live= q; /* One byte beyond the last byte of the last |
110 | live object. */ |
111 | HeapWord* first_dead = space()->end(); /* The first dead object. */ |
112 | |
113 | const intx interval = PrefetchScanIntervalInBytes; |
114 | |
115 | while (q < t) { |
116 | assert(oop(q)->mark_raw()->is_marked() || oop(q)->mark_raw()->is_unlocked() || |
117 | oop(q)->mark_raw()->has_bias_pattern(), |
118 | "these are the only valid states during a mark sweep" ); |
119 | if (oop(q)->is_gc_marked()) { |
120 | /* prefetch beyond q */ |
121 | Prefetch::write(q, interval); |
122 | size_t size = oop(q)->size(); |
123 | |
124 | size_t compaction_max_size = pointer_delta(compact_end, compact_top); |
125 | |
126 | // This should only happen if a space in the young gen overflows the |
127 | // old gen. If that should happen, we null out the start_array, because |
128 | // the young spaces are not covered by one. |
129 | while(size > compaction_max_size) { |
130 | // First record the last compact_top |
131 | dest->set_compaction_top(compact_top); |
132 | |
133 | // Advance to the next compaction decorator |
134 | advance_destination_decorator(); |
135 | dest = destination_decorator(); |
136 | |
137 | // Update compaction info |
138 | start_array = dest->start_array(); |
139 | compact_top = dest->compaction_top(); |
140 | compact_end = dest->space()->end(); |
141 | assert(compact_top == dest->space()->bottom(), "Advanced to space already in use" ); |
142 | assert(compact_end > compact_top, "Must always be space remaining" ); |
143 | compaction_max_size = |
144 | pointer_delta(compact_end, compact_top); |
145 | } |
146 | |
147 | // store the forwarding pointer into the mark word |
148 | if (q != compact_top) { |
149 | oop(q)->forward_to(oop(compact_top)); |
150 | assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark" ); |
151 | } else { |
152 | // if the object isn't moving we can just set the mark to the default |
153 | // mark and handle it specially later on. |
154 | oop(q)->init_mark_raw(); |
155 | assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL" ); |
156 | } |
157 | |
158 | // Update object start array |
159 | if (start_array) { |
160 | start_array->allocate_block(compact_top); |
161 | } |
162 | |
163 | compact_top += size; |
164 | assert(compact_top <= dest->space()->end(), |
165 | "Exceeding space in destination" ); |
166 | |
167 | q += size; |
168 | end_of_live = q; |
169 | } else { |
170 | /* run over all the contiguous dead objects */ |
171 | HeapWord* end = q; |
172 | do { |
173 | /* prefetch beyond end */ |
174 | Prefetch::write(end, interval); |
175 | end += oop(end)->size(); |
176 | } while (end < t && (!oop(end)->is_gc_marked())); |
177 | |
178 | /* see if we might want to pretend this object is alive so that |
179 | * we don't have to compact quite as often. |
180 | */ |
181 | if (allowed_deadspace > 0 && q == compact_top) { |
182 | size_t sz = pointer_delta(end, q); |
183 | if (insert_deadspace(allowed_deadspace, q, sz)) { |
184 | size_t compaction_max_size = pointer_delta(compact_end, compact_top); |
185 | |
186 | // This should only happen if a space in the young gen overflows the |
187 | // old gen. If that should happen, we null out the start_array, because |
188 | // the young spaces are not covered by one. |
189 | while (sz > compaction_max_size) { |
190 | // First record the last compact_top |
191 | dest->set_compaction_top(compact_top); |
192 | |
193 | // Advance to the next compaction decorator |
194 | advance_destination_decorator(); |
195 | dest = destination_decorator(); |
196 | |
197 | // Update compaction info |
198 | start_array = dest->start_array(); |
199 | compact_top = dest->compaction_top(); |
200 | compact_end = dest->space()->end(); |
201 | assert(compact_top == dest->space()->bottom(), "Advanced to space already in use" ); |
202 | assert(compact_end > compact_top, "Must always be space remaining" ); |
203 | compaction_max_size = |
204 | pointer_delta(compact_end, compact_top); |
205 | } |
206 | |
207 | // store the forwarding pointer into the mark word |
208 | if (q != compact_top) { |
209 | oop(q)->forward_to(oop(compact_top)); |
210 | assert(oop(q)->is_gc_marked(), "encoding the pointer should preserve the mark" ); |
211 | } else { |
212 | // if the object isn't moving we can just set the mark to the default |
213 | // mark and handle it specially later on. |
214 | oop(q)->init_mark_raw(); |
215 | assert(oop(q)->forwardee() == NULL, "should be forwarded to NULL" ); |
216 | } |
217 | |
218 | // Update object start array |
219 | if (start_array) { |
220 | start_array->allocate_block(compact_top); |
221 | } |
222 | |
223 | compact_top += sz; |
224 | assert(compact_top <= dest->space()->end(), |
225 | "Exceeding space in destination" ); |
226 | |
227 | q = end; |
228 | end_of_live = end; |
229 | continue; |
230 | } |
231 | } |
232 | |
233 | // q is a pointer to a dead object. Use this dead memory to store a pointer to the next live object. |
234 | (*(HeapWord**)q) = end; |
235 | |
236 | /* see if this is the first dead region. */ |
237 | if (q < first_dead) { |
238 | first_dead = q; |
239 | } |
240 | |
241 | /* move on to the next object */ |
242 | q = end; |
243 | } |
244 | } |
245 | |
246 | assert(q == t, "just checking" ); |
247 | _end_of_live = end_of_live; |
248 | if (end_of_live < first_dead) { |
249 | first_dead = end_of_live; |
250 | } |
251 | _first_dead = first_dead; |
252 | |
253 | // Update compaction top |
254 | dest->set_compaction_top(compact_top); |
255 | } |
256 | |
257 | bool PSMarkSweepDecorator::insert_deadspace(size_t& allowed_deadspace_words, |
258 | HeapWord* q, size_t deadlength) { |
259 | if (allowed_deadspace_words >= deadlength) { |
260 | allowed_deadspace_words -= deadlength; |
261 | CollectedHeap::fill_with_object(q, deadlength); |
262 | oop(q)->set_mark_raw(oop(q)->mark_raw()->set_marked()); |
263 | assert((int) deadlength == oop(q)->size(), "bad filler object size" ); |
264 | // Recall that we required "q == compaction_top". |
265 | return true; |
266 | } else { |
267 | allowed_deadspace_words = 0; |
268 | return false; |
269 | } |
270 | } |
271 | |
272 | void PSMarkSweepDecorator::adjust_pointers() { |
273 | // adjust all the interior pointers to point at the new locations of objects |
274 | // Used by MarkSweep::mark_sweep_phase3() |
275 | |
276 | HeapWord* q = space()->bottom(); |
277 | HeapWord* t = _end_of_live; // Established by "prepare_for_compaction". |
278 | |
279 | assert(_first_dead <= _end_of_live, "Stands to reason, no?" ); |
280 | |
281 | if (q < t && _first_dead > q && |
282 | !oop(q)->is_gc_marked()) { |
283 | // we have a chunk of the space which hasn't moved and we've |
284 | // reinitialized the mark word during the previous pass, so we can't |
285 | // use is_gc_marked for the traversal. |
286 | HeapWord* end = _first_dead; |
287 | |
288 | while (q < end) { |
289 | // point all the oops to the new location |
290 | size_t size = MarkSweep::adjust_pointers(oop(q)); |
291 | q += size; |
292 | } |
293 | |
294 | if (_first_dead == t) { |
295 | q = t; |
296 | } else { |
297 | // The first dead object should contain a pointer to the first live object |
298 | q = *(HeapWord**)_first_dead; |
299 | } |
300 | } |
301 | const intx interval = PrefetchScanIntervalInBytes; |
302 | |
303 | debug_only(HeapWord* prev_q = NULL); |
304 | while (q < t) { |
305 | // prefetch beyond q |
306 | Prefetch::write(q, interval); |
307 | if (oop(q)->is_gc_marked()) { |
308 | // q is alive |
309 | // point all the oops to the new location |
310 | size_t size = MarkSweep::adjust_pointers(oop(q)); |
311 | debug_only(prev_q = q); |
312 | q += size; |
313 | } else { |
314 | debug_only(prev_q = q); |
315 | // The first dead object is no longer an object. At that memory address, |
316 | // there is a pointer to the first live object that the previous phase found. |
317 | q = *(HeapWord**)q; |
318 | assert(q > prev_q, "we should be moving forward through memory, q: " PTR_FORMAT ", prev_q: " PTR_FORMAT, p2i(q), p2i(prev_q)); |
319 | } |
320 | } |
321 | |
322 | assert(q == t, "just checking" ); |
323 | } |
324 | |
325 | void PSMarkSweepDecorator::compact(bool mangle_free_space ) { |
326 | // Copy all live objects to their new location |
327 | // Used by MarkSweep::mark_sweep_phase4() |
328 | |
329 | HeapWord* q = space()->bottom(); |
330 | HeapWord* const t = _end_of_live; |
331 | debug_only(HeapWord* prev_q = NULL); |
332 | |
333 | if (q < t && _first_dead > q && |
334 | !oop(q)->is_gc_marked()) { |
335 | #ifdef ASSERT |
336 | // we have a chunk of the space which hasn't moved and we've reinitialized the |
337 | // mark word during the previous pass, so we can't use is_gc_marked for the |
338 | // traversal. |
339 | HeapWord* const end = _first_dead; |
340 | |
341 | while (q < end) { |
342 | size_t size = oop(q)->size(); |
343 | assert(!oop(q)->is_gc_marked(), "should be unmarked (special dense prefix handling)" ); |
344 | debug_only(prev_q = q); |
345 | q += size; |
346 | } |
347 | #endif |
348 | |
349 | if (_first_dead == t) { |
350 | q = t; |
351 | } else { |
352 | // $$$ Funky |
353 | q = (HeapWord*) oop(_first_dead)->mark_raw()->decode_pointer(); |
354 | } |
355 | } |
356 | |
357 | const intx scan_interval = PrefetchScanIntervalInBytes; |
358 | const intx copy_interval = PrefetchCopyIntervalInBytes; |
359 | |
360 | while (q < t) { |
361 | if (!oop(q)->is_gc_marked()) { |
362 | // mark is pointer to next marked oop |
363 | debug_only(prev_q = q); |
364 | q = (HeapWord*) oop(q)->mark_raw()->decode_pointer(); |
365 | assert(q > prev_q, "we should be moving forward through memory" ); |
366 | } else { |
367 | // prefetch beyond q |
368 | Prefetch::read(q, scan_interval); |
369 | |
370 | // size and destination |
371 | size_t size = oop(q)->size(); |
372 | HeapWord* compaction_top = (HeapWord*)oop(q)->forwardee(); |
373 | |
374 | // prefetch beyond compaction_top |
375 | Prefetch::write(compaction_top, copy_interval); |
376 | |
377 | // copy object and reinit its mark |
378 | assert(q != compaction_top, "everything in this pass should be moving" ); |
379 | Copy::aligned_conjoint_words(q, compaction_top, size); |
380 | oop(compaction_top)->init_mark_raw(); |
381 | assert(oop(compaction_top)->klass() != NULL, "should have a class" ); |
382 | |
383 | debug_only(prev_q = q); |
384 | q += size; |
385 | } |
386 | } |
387 | |
388 | assert(compaction_top() >= space()->bottom() && compaction_top() <= space()->end(), |
389 | "should point inside space" ); |
390 | space()->set_top(compaction_top()); |
391 | |
392 | if (mangle_free_space) { |
393 | space()->mangle_unused_area(); |
394 | } |
395 | } |
396 | |