1 | /* |
2 | * Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_GC_SHARED_SPACE_INLINE_HPP |
26 | #define SHARE_GC_SHARED_SPACE_INLINE_HPP |
27 | |
28 | #include "gc/shared/blockOffsetTable.inline.hpp" |
29 | #include "gc/shared/collectedHeap.hpp" |
30 | #include "gc/shared/generation.hpp" |
31 | #include "gc/shared/space.hpp" |
32 | #include "gc/shared/spaceDecorator.hpp" |
33 | #include "oops/oopsHierarchy.hpp" |
34 | #include "oops/oop.inline.hpp" |
35 | #include "runtime/prefetch.inline.hpp" |
36 | #include "runtime/safepoint.hpp" |
37 | #if INCLUDE_SERIALGC |
38 | #include "gc/serial/markSweep.inline.hpp" |
39 | #endif |
40 | |
41 | inline HeapWord* Space::block_start(const void* p) { |
42 | return block_start_const(p); |
43 | } |
44 | |
45 | inline HeapWord* OffsetTableContigSpace::allocate(size_t size) { |
46 | HeapWord* res = ContiguousSpace::allocate(size); |
47 | if (res != NULL) { |
48 | _offsets.alloc_block(res, size); |
49 | } |
50 | return res; |
51 | } |
52 | |
53 | // Because of the requirement of keeping "_offsets" up to date with the |
54 | // allocations, we sequentialize these with a lock. Therefore, best if |
55 | // this is used for larger LAB allocations only. |
56 | inline HeapWord* OffsetTableContigSpace::par_allocate(size_t size) { |
57 | MutexLocker x(&_par_alloc_lock); |
58 | // This ought to be just "allocate", because of the lock above, but that |
59 | // ContiguousSpace::allocate asserts that either the allocating thread |
60 | // holds the heap lock or it is the VM thread and we're at a safepoint. |
61 | // The best I (dld) could figure was to put a field in ContiguousSpace |
62 | // meaning "locking at safepoint taken care of", and set/reset that |
63 | // here. But this will do for now, especially in light of the comment |
64 | // above. Perhaps in the future some lock-free manner of keeping the |
65 | // coordination. |
66 | HeapWord* res = ContiguousSpace::par_allocate(size); |
67 | if (res != NULL) { |
68 | _offsets.alloc_block(res, size); |
69 | } |
70 | return res; |
71 | } |
72 | |
73 | inline HeapWord* |
74 | OffsetTableContigSpace::block_start_const(const void* p) const { |
75 | return _offsets.block_start(p); |
76 | } |
77 | |
78 | size_t CompactibleSpace::obj_size(const HeapWord* addr) const { |
79 | return oop(addr)->size(); |
80 | } |
81 | |
82 | #if INCLUDE_SERIALGC |
83 | |
84 | class DeadSpacer : StackObj { |
85 | size_t _allowed_deadspace_words; |
86 | bool _active; |
87 | CompactibleSpace* _space; |
88 | |
89 | public: |
90 | DeadSpacer(CompactibleSpace* space) : _allowed_deadspace_words(0), _space(space) { |
91 | size_t ratio = _space->allowed_dead_ratio(); |
92 | _active = ratio > 0; |
93 | |
94 | if (_active) { |
95 | assert(!UseG1GC, "G1 should not be using dead space" ); |
96 | |
97 | // We allow some amount of garbage towards the bottom of the space, so |
98 | // we don't start compacting before there is a significant gain to be made. |
99 | // Occasionally, we want to ensure a full compaction, which is determined |
100 | // by the MarkSweepAlwaysCompactCount parameter. |
101 | if ((MarkSweep::total_invocations() % MarkSweepAlwaysCompactCount) != 0) { |
102 | _allowed_deadspace_words = (space->capacity() * ratio / 100) / HeapWordSize; |
103 | } else { |
104 | _active = false; |
105 | } |
106 | } |
107 | } |
108 | |
109 | |
110 | bool insert_deadspace(HeapWord* dead_start, HeapWord* dead_end) { |
111 | if (!_active) { |
112 | return false; |
113 | } |
114 | |
115 | size_t dead_length = pointer_delta(dead_end, dead_start); |
116 | if (_allowed_deadspace_words >= dead_length) { |
117 | _allowed_deadspace_words -= dead_length; |
118 | CollectedHeap::fill_with_object(dead_start, dead_length); |
119 | oop obj = oop(dead_start); |
120 | obj->set_mark_raw(obj->mark_raw()->set_marked()); |
121 | |
122 | assert(dead_length == (size_t)obj->size(), "bad filler object size" ); |
123 | log_develop_trace(gc, compaction)("Inserting object to dead space: " PTR_FORMAT ", " PTR_FORMAT ", " SIZE_FORMAT "b" , |
124 | p2i(dead_start), p2i(dead_end), dead_length * HeapWordSize); |
125 | |
126 | return true; |
127 | } else { |
128 | _active = false; |
129 | return false; |
130 | } |
131 | } |
132 | |
133 | }; |
134 | |
135 | template <class SpaceType> |
136 | inline void CompactibleSpace::scan_and_forward(SpaceType* space, CompactPoint* cp) { |
137 | // Compute the new addresses for the live objects and store it in the mark |
138 | // Used by universe::mark_sweep_phase2() |
139 | |
140 | // We're sure to be here before any objects are compacted into this |
141 | // space, so this is a good time to initialize this: |
142 | space->set_compaction_top(space->bottom()); |
143 | |
144 | if (cp->space == NULL) { |
145 | assert(cp->gen != NULL, "need a generation" ); |
146 | assert(cp->threshold == NULL, "just checking" ); |
147 | assert(cp->gen->first_compaction_space() == space, "just checking" ); |
148 | cp->space = cp->gen->first_compaction_space(); |
149 | cp->threshold = cp->space->initialize_threshold(); |
150 | cp->space->set_compaction_top(cp->space->bottom()); |
151 | } |
152 | |
153 | HeapWord* compact_top = cp->space->compaction_top(); // This is where we are currently compacting to. |
154 | |
155 | DeadSpacer dead_spacer(space); |
156 | |
157 | HeapWord* end_of_live = space->bottom(); // One byte beyond the last byte of the last live object. |
158 | HeapWord* first_dead = NULL; // The first dead object. |
159 | |
160 | const intx interval = PrefetchScanIntervalInBytes; |
161 | |
162 | HeapWord* cur_obj = space->bottom(); |
163 | HeapWord* scan_limit = space->scan_limit(); |
164 | |
165 | while (cur_obj < scan_limit) { |
166 | assert(!space->scanned_block_is_obj(cur_obj) || |
167 | oop(cur_obj)->mark_raw()->is_marked() || oop(cur_obj)->mark_raw()->is_unlocked() || |
168 | oop(cur_obj)->mark_raw()->has_bias_pattern(), |
169 | "these are the only valid states during a mark sweep" ); |
170 | if (space->scanned_block_is_obj(cur_obj) && oop(cur_obj)->is_gc_marked()) { |
171 | // prefetch beyond cur_obj |
172 | Prefetch::write(cur_obj, interval); |
173 | size_t size = space->scanned_block_size(cur_obj); |
174 | compact_top = cp->space->forward(oop(cur_obj), size, cp, compact_top); |
175 | cur_obj += size; |
176 | end_of_live = cur_obj; |
177 | } else { |
178 | // run over all the contiguous dead objects |
179 | HeapWord* end = cur_obj; |
180 | do { |
181 | // prefetch beyond end |
182 | Prefetch::write(end, interval); |
183 | end += space->scanned_block_size(end); |
184 | } while (end < scan_limit && (!space->scanned_block_is_obj(end) || !oop(end)->is_gc_marked())); |
185 | |
186 | // see if we might want to pretend this object is alive so that |
187 | // we don't have to compact quite as often. |
188 | if (cur_obj == compact_top && dead_spacer.insert_deadspace(cur_obj, end)) { |
189 | oop obj = oop(cur_obj); |
190 | compact_top = cp->space->forward(obj, obj->size(), cp, compact_top); |
191 | end_of_live = end; |
192 | } else { |
193 | // otherwise, it really is a free region. |
194 | |
195 | // cur_obj is a pointer to a dead object. Use this dead memory to store a pointer to the next live object. |
196 | *(HeapWord**)cur_obj = end; |
197 | |
198 | // see if this is the first dead region. |
199 | if (first_dead == NULL) { |
200 | first_dead = cur_obj; |
201 | } |
202 | } |
203 | |
204 | // move on to the next object |
205 | cur_obj = end; |
206 | } |
207 | } |
208 | |
209 | assert(cur_obj == scan_limit, "just checking" ); |
210 | space->_end_of_live = end_of_live; |
211 | if (first_dead != NULL) { |
212 | space->_first_dead = first_dead; |
213 | } else { |
214 | space->_first_dead = end_of_live; |
215 | } |
216 | |
217 | // save the compaction_top of the compaction space. |
218 | cp->space->set_compaction_top(compact_top); |
219 | } |
220 | |
221 | template <class SpaceType> |
222 | inline void CompactibleSpace::scan_and_adjust_pointers(SpaceType* space) { |
223 | // adjust all the interior pointers to point at the new locations of objects |
224 | // Used by MarkSweep::mark_sweep_phase3() |
225 | |
226 | HeapWord* cur_obj = space->bottom(); |
227 | HeapWord* const end_of_live = space->_end_of_live; // Established by "scan_and_forward". |
228 | HeapWord* const first_dead = space->_first_dead; // Established by "scan_and_forward". |
229 | |
230 | assert(first_dead <= end_of_live, "Stands to reason, no?" ); |
231 | |
232 | const intx interval = PrefetchScanIntervalInBytes; |
233 | |
234 | debug_only(HeapWord* prev_obj = NULL); |
235 | while (cur_obj < end_of_live) { |
236 | Prefetch::write(cur_obj, interval); |
237 | if (cur_obj < first_dead || oop(cur_obj)->is_gc_marked()) { |
238 | // cur_obj is alive |
239 | // point all the oops to the new location |
240 | size_t size = MarkSweep::adjust_pointers(oop(cur_obj)); |
241 | size = space->adjust_obj_size(size); |
242 | debug_only(prev_obj = cur_obj); |
243 | cur_obj += size; |
244 | } else { |
245 | debug_only(prev_obj = cur_obj); |
246 | // cur_obj is not a live object, instead it points at the next live object |
247 | cur_obj = *(HeapWord**)cur_obj; |
248 | assert(cur_obj > prev_obj, "we should be moving forward through memory, cur_obj: " PTR_FORMAT ", prev_obj: " PTR_FORMAT, p2i(cur_obj), p2i(prev_obj)); |
249 | } |
250 | } |
251 | |
252 | assert(cur_obj == end_of_live, "just checking" ); |
253 | } |
254 | |
255 | #ifdef ASSERT |
256 | template <class SpaceType> |
257 | inline void CompactibleSpace::verify_up_to_first_dead(SpaceType* space) { |
258 | HeapWord* cur_obj = space->bottom(); |
259 | |
260 | if (cur_obj < space->_end_of_live && space->_first_dead > cur_obj && !oop(cur_obj)->is_gc_marked()) { |
261 | // we have a chunk of the space which hasn't moved and we've reinitialized |
262 | // the mark word during the previous pass, so we can't use is_gc_marked for |
263 | // the traversal. |
264 | HeapWord* prev_obj = NULL; |
265 | |
266 | while (cur_obj < space->_first_dead) { |
267 | size_t size = space->obj_size(cur_obj); |
268 | assert(!oop(cur_obj)->is_gc_marked(), "should be unmarked (special dense prefix handling)" ); |
269 | prev_obj = cur_obj; |
270 | cur_obj += size; |
271 | } |
272 | } |
273 | } |
274 | #endif |
275 | |
276 | template <class SpaceType> |
277 | inline void CompactibleSpace::clear_empty_region(SpaceType* space) { |
278 | // Let's remember if we were empty before we did the compaction. |
279 | bool was_empty = space->used_region().is_empty(); |
280 | // Reset space after compaction is complete |
281 | space->reset_after_compaction(); |
282 | // We do this clear, below, since it has overloaded meanings for some |
283 | // space subtypes. For example, OffsetTableContigSpace's that were |
284 | // compacted into will have had their offset table thresholds updated |
285 | // continuously, but those that weren't need to have their thresholds |
286 | // re-initialized. Also mangles unused area for debugging. |
287 | if (space->used_region().is_empty()) { |
288 | if (!was_empty) space->clear(SpaceDecorator::Mangle); |
289 | } else { |
290 | if (ZapUnusedHeapArea) space->mangle_unused_area(); |
291 | } |
292 | } |
293 | |
294 | template <class SpaceType> |
295 | inline void CompactibleSpace::scan_and_compact(SpaceType* space) { |
296 | // Copy all live objects to their new location |
297 | // Used by MarkSweep::mark_sweep_phase4() |
298 | |
299 | verify_up_to_first_dead(space); |
300 | |
301 | HeapWord* const bottom = space->bottom(); |
302 | HeapWord* const end_of_live = space->_end_of_live; |
303 | |
304 | assert(space->_first_dead <= end_of_live, "Invariant. _first_dead: " PTR_FORMAT " <= end_of_live: " PTR_FORMAT, p2i(space->_first_dead), p2i(end_of_live)); |
305 | if (space->_first_dead == end_of_live && (bottom == end_of_live || !oop(bottom)->is_gc_marked())) { |
306 | // Nothing to compact. The space is either empty or all live object should be left in place. |
307 | clear_empty_region(space); |
308 | return; |
309 | } |
310 | |
311 | const intx scan_interval = PrefetchScanIntervalInBytes; |
312 | const intx copy_interval = PrefetchCopyIntervalInBytes; |
313 | |
314 | assert(bottom < end_of_live, "bottom: " PTR_FORMAT " should be < end_of_live: " PTR_FORMAT, p2i(bottom), p2i(end_of_live)); |
315 | HeapWord* cur_obj = bottom; |
316 | if (space->_first_dead > cur_obj && !oop(cur_obj)->is_gc_marked()) { |
317 | // All object before _first_dead can be skipped. They should not be moved. |
318 | // A pointer to the first live object is stored at the memory location for _first_dead. |
319 | cur_obj = *(HeapWord**)(space->_first_dead); |
320 | } |
321 | |
322 | debug_only(HeapWord* prev_obj = NULL); |
323 | while (cur_obj < end_of_live) { |
324 | if (!oop(cur_obj)->is_gc_marked()) { |
325 | debug_only(prev_obj = cur_obj); |
326 | // The first word of the dead object contains a pointer to the next live object or end of space. |
327 | cur_obj = *(HeapWord**)cur_obj; |
328 | assert(cur_obj > prev_obj, "we should be moving forward through memory" ); |
329 | } else { |
330 | // prefetch beyond q |
331 | Prefetch::read(cur_obj, scan_interval); |
332 | |
333 | // size and destination |
334 | size_t size = space->obj_size(cur_obj); |
335 | HeapWord* compaction_top = (HeapWord*)oop(cur_obj)->forwardee(); |
336 | |
337 | // prefetch beyond compaction_top |
338 | Prefetch::write(compaction_top, copy_interval); |
339 | |
340 | // copy object and reinit its mark |
341 | assert(cur_obj != compaction_top, "everything in this pass should be moving" ); |
342 | Copy::aligned_conjoint_words(cur_obj, compaction_top, size); |
343 | oop(compaction_top)->init_mark_raw(); |
344 | assert(oop(compaction_top)->klass() != NULL, "should have a class" ); |
345 | |
346 | debug_only(prev_obj = cur_obj); |
347 | cur_obj += size; |
348 | } |
349 | } |
350 | |
351 | clear_empty_region(space); |
352 | } |
353 | |
354 | #endif // INCLUDE_SERIALGC |
355 | |
356 | size_t ContiguousSpace::scanned_block_size(const HeapWord* addr) const { |
357 | return oop(addr)->size(); |
358 | } |
359 | |
360 | template <typename OopClosureType> |
361 | void ContiguousSpace::oop_since_save_marks_iterate(OopClosureType* blk) { |
362 | HeapWord* t; |
363 | HeapWord* p = saved_mark_word(); |
364 | assert(p != NULL, "expected saved mark" ); |
365 | |
366 | const intx interval = PrefetchScanIntervalInBytes; |
367 | do { |
368 | t = top(); |
369 | while (p < t) { |
370 | Prefetch::write(p, interval); |
371 | debug_only(HeapWord* prev = p); |
372 | oop m = oop(p); |
373 | p += m->oop_iterate_size(blk); |
374 | } |
375 | } while (t < top()); |
376 | |
377 | set_saved_mark_word(p); |
378 | } |
379 | |
380 | template <typename OopClosureType> |
381 | void ContiguousSpace::par_oop_iterate(MemRegion mr, OopClosureType* blk) { |
382 | HeapWord* obj_addr = mr.start(); |
383 | HeapWord* limit = mr.end(); |
384 | while (obj_addr < limit) { |
385 | assert(oopDesc::is_oop(oop(obj_addr)), "Should be an oop" ); |
386 | obj_addr += oop(obj_addr)->oop_iterate_size(blk); |
387 | } |
388 | } |
389 | |
390 | #endif // SHARE_GC_SHARED_SPACE_INLINE_HPP |
391 | |