1 | /* |
2 | * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "classfile/systemDictionary.hpp" |
27 | #include "classfile/vmSymbols.hpp" |
28 | #include "gc/shared/blockOffsetTable.inline.hpp" |
29 | #include "gc/shared/collectedHeap.inline.hpp" |
30 | #include "gc/shared/genCollectedHeap.hpp" |
31 | #include "gc/shared/genOopClosures.inline.hpp" |
32 | #include "gc/shared/space.hpp" |
33 | #include "gc/shared/space.inline.hpp" |
34 | #include "gc/shared/spaceDecorator.hpp" |
35 | #include "memory/iterator.inline.hpp" |
36 | #include "memory/universe.hpp" |
37 | #include "oops/oop.inline.hpp" |
38 | #include "runtime/atomic.hpp" |
39 | #include "runtime/java.hpp" |
40 | #include "runtime/orderAccess.hpp" |
41 | #include "runtime/prefetch.inline.hpp" |
42 | #include "runtime/safepoint.hpp" |
43 | #include "utilities/align.hpp" |
44 | #include "utilities/copy.hpp" |
45 | #include "utilities/globalDefinitions.hpp" |
46 | #include "utilities/macros.hpp" |
47 | #if INCLUDE_SERIALGC |
48 | #include "gc/serial/defNewGeneration.hpp" |
49 | #endif |
50 | |
51 | HeapWord* DirtyCardToOopClosure::get_actual_top(HeapWord* top, |
52 | HeapWord* top_obj) { |
53 | if (top_obj != NULL) { |
54 | if (_sp->block_is_obj(top_obj)) { |
55 | if (_precision == CardTable::ObjHeadPreciseArray) { |
56 | if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { |
57 | // An arrayOop is starting on the dirty card - since we do exact |
58 | // store checks for objArrays we are done. |
59 | } else { |
60 | // Otherwise, it is possible that the object starting on the dirty |
61 | // card spans the entire card, and that the store happened on a |
62 | // later card. Figure out where the object ends. |
63 | // Use the block_size() method of the space over which |
64 | // the iteration is being done. That space (e.g. CMS) may have |
65 | // specific requirements on object sizes which will |
66 | // be reflected in the block_size() method. |
67 | top = top_obj + oop(top_obj)->size(); |
68 | } |
69 | } |
70 | } else { |
71 | top = top_obj; |
72 | } |
73 | } else { |
74 | assert(top == _sp->end(), "only case where top_obj == NULL" ); |
75 | } |
76 | return top; |
77 | } |
78 | |
79 | void DirtyCardToOopClosure::walk_mem_region(MemRegion mr, |
80 | HeapWord* bottom, |
81 | HeapWord* top) { |
82 | // 1. Blocks may or may not be objects. |
83 | // 2. Even when a block_is_obj(), it may not entirely |
84 | // occupy the block if the block quantum is larger than |
85 | // the object size. |
86 | // We can and should try to optimize by calling the non-MemRegion |
87 | // version of oop_iterate() for all but the extremal objects |
88 | // (for which we need to call the MemRegion version of |
89 | // oop_iterate()) To be done post-beta XXX |
90 | for (; bottom < top; bottom += _sp->block_size(bottom)) { |
91 | // As in the case of contiguous space above, we'd like to |
92 | // just use the value returned by oop_iterate to increment the |
93 | // current pointer; unfortunately, that won't work in CMS because |
94 | // we'd need an interface change (it seems) to have the space |
95 | // "adjust the object size" (for instance pad it up to its |
96 | // block alignment or minimum block size restrictions. XXX |
97 | if (_sp->block_is_obj(bottom) && |
98 | !_sp->obj_allocated_since_save_marks(oop(bottom))) { |
99 | oop(bottom)->oop_iterate(_cl, mr); |
100 | } |
101 | } |
102 | } |
103 | |
104 | // We get called with "mr" representing the dirty region |
105 | // that we want to process. Because of imprecise marking, |
106 | // we may need to extend the incoming "mr" to the right, |
107 | // and scan more. However, because we may already have |
108 | // scanned some of that extended region, we may need to |
109 | // trim its right-end back some so we do not scan what |
110 | // we (or another worker thread) may already have scanned |
111 | // or planning to scan. |
112 | void DirtyCardToOopClosure::do_MemRegion(MemRegion mr) { |
113 | |
114 | // Some collectors need to do special things whenever their dirty |
115 | // cards are processed. For instance, CMS must remember mutator updates |
116 | // (i.e. dirty cards) so as to re-scan mutated objects. |
117 | // Such work can be piggy-backed here on dirty card scanning, so as to make |
118 | // it slightly more efficient than doing a complete non-destructive pre-scan |
119 | // of the card table. |
120 | MemRegionClosure* pCl = _sp->preconsumptionDirtyCardClosure(); |
121 | if (pCl != NULL) { |
122 | pCl->do_MemRegion(mr); |
123 | } |
124 | |
125 | HeapWord* bottom = mr.start(); |
126 | HeapWord* last = mr.last(); |
127 | HeapWord* top = mr.end(); |
128 | HeapWord* bottom_obj; |
129 | HeapWord* top_obj; |
130 | |
131 | assert(_precision == CardTable::ObjHeadPreciseArray || |
132 | _precision == CardTable::Precise, |
133 | "Only ones we deal with for now." ); |
134 | |
135 | assert(_precision != CardTable::ObjHeadPreciseArray || |
136 | _last_bottom == NULL || top <= _last_bottom, |
137 | "Not decreasing" ); |
138 | NOT_PRODUCT(_last_bottom = mr.start()); |
139 | |
140 | bottom_obj = _sp->block_start(bottom); |
141 | top_obj = _sp->block_start(last); |
142 | |
143 | assert(bottom_obj <= bottom, "just checking" ); |
144 | assert(top_obj <= top, "just checking" ); |
145 | |
146 | // Given what we think is the top of the memory region and |
147 | // the start of the object at the top, get the actual |
148 | // value of the top. |
149 | top = get_actual_top(top, top_obj); |
150 | |
151 | // If the previous call did some part of this region, don't redo. |
152 | if (_precision == CardTable::ObjHeadPreciseArray && |
153 | _min_done != NULL && |
154 | _min_done < top) { |
155 | top = _min_done; |
156 | } |
157 | |
158 | // Top may have been reset, and in fact may be below bottom, |
159 | // e.g. the dirty card region is entirely in a now free object |
160 | // -- something that could happen with a concurrent sweeper. |
161 | bottom = MIN2(bottom, top); |
162 | MemRegion extended_mr = MemRegion(bottom, top); |
163 | assert(bottom <= top && |
164 | (_precision != CardTable::ObjHeadPreciseArray || |
165 | _min_done == NULL || |
166 | top <= _min_done), |
167 | "overlap!" ); |
168 | |
169 | // Walk the region if it is not empty; otherwise there is nothing to do. |
170 | if (!extended_mr.is_empty()) { |
171 | walk_mem_region(extended_mr, bottom_obj, top); |
172 | } |
173 | |
174 | _min_done = bottom; |
175 | } |
176 | |
177 | DirtyCardToOopClosure* Space::new_dcto_cl(OopIterateClosure* cl, |
178 | CardTable::PrecisionStyle precision, |
179 | HeapWord* boundary, |
180 | bool parallel) { |
181 | return new DirtyCardToOopClosure(this, cl, precision, boundary); |
182 | } |
183 | |
184 | HeapWord* ContiguousSpaceDCTOC::get_actual_top(HeapWord* top, |
185 | HeapWord* top_obj) { |
186 | if (top_obj != NULL && top_obj < (_sp->toContiguousSpace())->top()) { |
187 | if (_precision == CardTable::ObjHeadPreciseArray) { |
188 | if (oop(top_obj)->is_objArray() || oop(top_obj)->is_typeArray()) { |
189 | // An arrayOop is starting on the dirty card - since we do exact |
190 | // store checks for objArrays we are done. |
191 | } else { |
192 | // Otherwise, it is possible that the object starting on the dirty |
193 | // card spans the entire card, and that the store happened on a |
194 | // later card. Figure out where the object ends. |
195 | assert(_sp->block_size(top_obj) == (size_t) oop(top_obj)->size(), |
196 | "Block size and object size mismatch" ); |
197 | top = top_obj + oop(top_obj)->size(); |
198 | } |
199 | } |
200 | } else { |
201 | top = (_sp->toContiguousSpace())->top(); |
202 | } |
203 | return top; |
204 | } |
205 | |
206 | void FilteringDCTOC::walk_mem_region(MemRegion mr, |
207 | HeapWord* bottom, |
208 | HeapWord* top) { |
209 | // Note that this assumption won't hold if we have a concurrent |
210 | // collector in this space, which may have freed up objects after |
211 | // they were dirtied and before the stop-the-world GC that is |
212 | // examining cards here. |
213 | assert(bottom < top, "ought to be at least one obj on a dirty card." ); |
214 | |
215 | if (_boundary != NULL) { |
216 | // We have a boundary outside of which we don't want to look |
217 | // at objects, so create a filtering closure around the |
218 | // oop closure before walking the region. |
219 | FilteringClosure filter(_boundary, _cl); |
220 | walk_mem_region_with_cl(mr, bottom, top, &filter); |
221 | } else { |
222 | // No boundary, simply walk the heap with the oop closure. |
223 | walk_mem_region_with_cl(mr, bottom, top, _cl); |
224 | } |
225 | |
226 | } |
227 | |
228 | // We must replicate this so that the static type of "FilteringClosure" |
229 | // (see above) is apparent at the oop_iterate calls. |
230 | #define ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ |
231 | void ContiguousSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr, \ |
232 | HeapWord* bottom, \ |
233 | HeapWord* top, \ |
234 | ClosureType* cl) { \ |
235 | bottom += oop(bottom)->oop_iterate_size(cl, mr); \ |
236 | if (bottom < top) { \ |
237 | HeapWord* next_obj = bottom + oop(bottom)->size(); \ |
238 | while (next_obj < top) { \ |
239 | /* Bottom lies entirely below top, so we can call the */ \ |
240 | /* non-memRegion version of oop_iterate below. */ \ |
241 | oop(bottom)->oop_iterate(cl); \ |
242 | bottom = next_obj; \ |
243 | next_obj = bottom + oop(bottom)->size(); \ |
244 | } \ |
245 | /* Last object. */ \ |
246 | oop(bottom)->oop_iterate(cl, mr); \ |
247 | } \ |
248 | } |
249 | |
250 | // (There are only two of these, rather than N, because the split is due |
251 | // only to the introduction of the FilteringClosure, a local part of the |
252 | // impl of this abstraction.) |
253 | ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure) |
254 | ContiguousSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) |
255 | |
256 | DirtyCardToOopClosure* |
257 | ContiguousSpace::new_dcto_cl(OopIterateClosure* cl, |
258 | CardTable::PrecisionStyle precision, |
259 | HeapWord* boundary, |
260 | bool parallel) { |
261 | return new ContiguousSpaceDCTOC(this, cl, precision, boundary); |
262 | } |
263 | |
264 | void Space::initialize(MemRegion mr, |
265 | bool clear_space, |
266 | bool mangle_space) { |
267 | HeapWord* bottom = mr.start(); |
268 | HeapWord* end = mr.end(); |
269 | assert(Universe::on_page_boundary(bottom) && Universe::on_page_boundary(end), |
270 | "invalid space boundaries" ); |
271 | set_bottom(bottom); |
272 | set_end(end); |
273 | if (clear_space) clear(mangle_space); |
274 | } |
275 | |
276 | void Space::clear(bool mangle_space) { |
277 | if (ZapUnusedHeapArea && mangle_space) { |
278 | mangle_unused_area(); |
279 | } |
280 | } |
281 | |
282 | ContiguousSpace::ContiguousSpace(): CompactibleSpace(), _top(NULL), |
283 | _concurrent_iteration_safe_limit(NULL) { |
284 | _mangler = new GenSpaceMangler(this); |
285 | } |
286 | |
287 | ContiguousSpace::~ContiguousSpace() { |
288 | delete _mangler; |
289 | } |
290 | |
291 | void ContiguousSpace::initialize(MemRegion mr, |
292 | bool clear_space, |
293 | bool mangle_space) |
294 | { |
295 | CompactibleSpace::initialize(mr, clear_space, mangle_space); |
296 | set_concurrent_iteration_safe_limit(top()); |
297 | } |
298 | |
299 | void ContiguousSpace::clear(bool mangle_space) { |
300 | set_top(bottom()); |
301 | set_saved_mark(); |
302 | CompactibleSpace::clear(mangle_space); |
303 | } |
304 | |
305 | bool ContiguousSpace::is_free_block(const HeapWord* p) const { |
306 | return p >= _top; |
307 | } |
308 | |
309 | void OffsetTableContigSpace::clear(bool mangle_space) { |
310 | ContiguousSpace::clear(mangle_space); |
311 | _offsets.initialize_threshold(); |
312 | } |
313 | |
314 | void OffsetTableContigSpace::set_bottom(HeapWord* new_bottom) { |
315 | Space::set_bottom(new_bottom); |
316 | _offsets.set_bottom(new_bottom); |
317 | } |
318 | |
319 | void OffsetTableContigSpace::set_end(HeapWord* new_end) { |
320 | // Space should not advertise an increase in size |
321 | // until after the underlying offset table has been enlarged. |
322 | _offsets.resize(pointer_delta(new_end, bottom())); |
323 | Space::set_end(new_end); |
324 | } |
325 | |
326 | #ifndef PRODUCT |
327 | |
328 | void ContiguousSpace::set_top_for_allocations(HeapWord* v) { |
329 | mangler()->set_top_for_allocations(v); |
330 | } |
331 | void ContiguousSpace::set_top_for_allocations() { |
332 | mangler()->set_top_for_allocations(top()); |
333 | } |
334 | void ContiguousSpace::check_mangled_unused_area(HeapWord* limit) { |
335 | mangler()->check_mangled_unused_area(limit); |
336 | } |
337 | |
338 | void ContiguousSpace::check_mangled_unused_area_complete() { |
339 | mangler()->check_mangled_unused_area_complete(); |
340 | } |
341 | |
342 | // Mangled only the unused space that has not previously |
343 | // been mangled and that has not been allocated since being |
344 | // mangled. |
345 | void ContiguousSpace::mangle_unused_area() { |
346 | mangler()->mangle_unused_area(); |
347 | } |
348 | void ContiguousSpace::mangle_unused_area_complete() { |
349 | mangler()->mangle_unused_area_complete(); |
350 | } |
351 | #endif // NOT_PRODUCT |
352 | |
353 | void CompactibleSpace::initialize(MemRegion mr, |
354 | bool clear_space, |
355 | bool mangle_space) { |
356 | Space::initialize(mr, clear_space, mangle_space); |
357 | set_compaction_top(bottom()); |
358 | _next_compaction_space = NULL; |
359 | } |
360 | |
361 | void CompactibleSpace::clear(bool mangle_space) { |
362 | Space::clear(mangle_space); |
363 | _compaction_top = bottom(); |
364 | } |
365 | |
366 | HeapWord* CompactibleSpace::forward(oop q, size_t size, |
367 | CompactPoint* cp, HeapWord* compact_top) { |
368 | // q is alive |
369 | // First check if we should switch compaction space |
370 | assert(this == cp->space, "'this' should be current compaction space." ); |
371 | size_t compaction_max_size = pointer_delta(end(), compact_top); |
372 | while (size > compaction_max_size) { |
373 | // switch to next compaction space |
374 | cp->space->set_compaction_top(compact_top); |
375 | cp->space = cp->space->next_compaction_space(); |
376 | if (cp->space == NULL) { |
377 | cp->gen = GenCollectedHeap::heap()->young_gen(); |
378 | assert(cp->gen != NULL, "compaction must succeed" ); |
379 | cp->space = cp->gen->first_compaction_space(); |
380 | assert(cp->space != NULL, "generation must have a first compaction space" ); |
381 | } |
382 | compact_top = cp->space->bottom(); |
383 | cp->space->set_compaction_top(compact_top); |
384 | cp->threshold = cp->space->initialize_threshold(); |
385 | compaction_max_size = pointer_delta(cp->space->end(), compact_top); |
386 | } |
387 | |
388 | // store the forwarding pointer into the mark word |
389 | if ((HeapWord*)q != compact_top) { |
390 | q->forward_to(oop(compact_top)); |
391 | assert(q->is_gc_marked(), "encoding the pointer should preserve the mark" ); |
392 | } else { |
393 | // if the object isn't moving we can just set the mark to the default |
394 | // mark and handle it specially later on. |
395 | q->init_mark_raw(); |
396 | assert(q->forwardee() == NULL, "should be forwarded to NULL" ); |
397 | } |
398 | |
399 | compact_top += size; |
400 | |
401 | // we need to update the offset table so that the beginnings of objects can be |
402 | // found during scavenge. Note that we are updating the offset table based on |
403 | // where the object will be once the compaction phase finishes. |
404 | if (compact_top > cp->threshold) |
405 | cp->threshold = |
406 | cp->space->cross_threshold(compact_top - size, compact_top); |
407 | return compact_top; |
408 | } |
409 | |
410 | #if INCLUDE_SERIALGC |
411 | |
412 | void ContiguousSpace::prepare_for_compaction(CompactPoint* cp) { |
413 | scan_and_forward(this, cp); |
414 | } |
415 | |
416 | void CompactibleSpace::adjust_pointers() { |
417 | // Check first is there is any work to do. |
418 | if (used() == 0) { |
419 | return; // Nothing to do. |
420 | } |
421 | |
422 | scan_and_adjust_pointers(this); |
423 | } |
424 | |
425 | void CompactibleSpace::compact() { |
426 | scan_and_compact(this); |
427 | } |
428 | |
429 | #endif // INCLUDE_SERIALGC |
430 | |
431 | void Space::print_short() const { print_short_on(tty); } |
432 | |
433 | void Space::print_short_on(outputStream* st) const { |
434 | st->print(" space " SIZE_FORMAT "K, %3d%% used" , capacity() / K, |
435 | (int) ((double) used() * 100 / capacity())); |
436 | } |
437 | |
438 | void Space::print() const { print_on(tty); } |
439 | |
440 | void Space::print_on(outputStream* st) const { |
441 | print_short_on(st); |
442 | st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ")" , |
443 | p2i(bottom()), p2i(end())); |
444 | } |
445 | |
446 | void ContiguousSpace::print_on(outputStream* st) const { |
447 | print_short_on(st); |
448 | st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " INTPTR_FORMAT ")" , |
449 | p2i(bottom()), p2i(top()), p2i(end())); |
450 | } |
451 | |
452 | void OffsetTableContigSpace::print_on(outputStream* st) const { |
453 | print_short_on(st); |
454 | st->print_cr(" [" INTPTR_FORMAT ", " INTPTR_FORMAT ", " |
455 | INTPTR_FORMAT ", " INTPTR_FORMAT ")" , |
456 | p2i(bottom()), p2i(top()), p2i(_offsets.threshold()), p2i(end())); |
457 | } |
458 | |
459 | void ContiguousSpace::verify() const { |
460 | HeapWord* p = bottom(); |
461 | HeapWord* t = top(); |
462 | HeapWord* prev_p = NULL; |
463 | while (p < t) { |
464 | oopDesc::verify(oop(p)); |
465 | prev_p = p; |
466 | p += oop(p)->size(); |
467 | } |
468 | guarantee(p == top(), "end of last object must match end of space" ); |
469 | if (top() != end()) { |
470 | guarantee(top() == block_start_const(end()-1) && |
471 | top() == block_start_const(top()), |
472 | "top should be start of unallocated block, if it exists" ); |
473 | } |
474 | } |
475 | |
476 | void Space::oop_iterate(OopIterateClosure* blk) { |
477 | ObjectToOopClosure blk2(blk); |
478 | object_iterate(&blk2); |
479 | } |
480 | |
481 | bool Space::obj_is_alive(const HeapWord* p) const { |
482 | assert (block_is_obj(p), "The address should point to an object" ); |
483 | return true; |
484 | } |
485 | |
486 | void ContiguousSpace::oop_iterate(OopIterateClosure* blk) { |
487 | if (is_empty()) return; |
488 | HeapWord* obj_addr = bottom(); |
489 | HeapWord* t = top(); |
490 | // Could call objects iterate, but this is easier. |
491 | while (obj_addr < t) { |
492 | obj_addr += oop(obj_addr)->oop_iterate_size(blk); |
493 | } |
494 | } |
495 | |
496 | void ContiguousSpace::object_iterate(ObjectClosure* blk) { |
497 | if (is_empty()) return; |
498 | object_iterate_from(bottom(), blk); |
499 | } |
500 | |
501 | // For a ContiguousSpace object_iterate() and safe_object_iterate() |
502 | // are the same. |
503 | void ContiguousSpace::safe_object_iterate(ObjectClosure* blk) { |
504 | object_iterate(blk); |
505 | } |
506 | |
507 | void ContiguousSpace::object_iterate_from(HeapWord* mark, ObjectClosure* blk) { |
508 | while (mark < top()) { |
509 | blk->do_object(oop(mark)); |
510 | mark += oop(mark)->size(); |
511 | } |
512 | } |
513 | |
514 | HeapWord* |
515 | ContiguousSpace::object_iterate_careful(ObjectClosureCareful* blk) { |
516 | HeapWord * limit = concurrent_iteration_safe_limit(); |
517 | assert(limit <= top(), "sanity check" ); |
518 | for (HeapWord* p = bottom(); p < limit;) { |
519 | size_t size = blk->do_object_careful(oop(p)); |
520 | if (size == 0) { |
521 | return p; // failed at p |
522 | } else { |
523 | p += size; |
524 | } |
525 | } |
526 | return NULL; // all done |
527 | } |
528 | |
529 | // Very general, slow implementation. |
530 | HeapWord* ContiguousSpace::block_start_const(const void* p) const { |
531 | assert(MemRegion(bottom(), end()).contains(p), |
532 | "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")" , |
533 | p2i(p), p2i(bottom()), p2i(end())); |
534 | if (p >= top()) { |
535 | return top(); |
536 | } else { |
537 | HeapWord* last = bottom(); |
538 | HeapWord* cur = last; |
539 | while (cur <= p) { |
540 | last = cur; |
541 | cur += oop(cur)->size(); |
542 | } |
543 | assert(oopDesc::is_oop(oop(last)), PTR_FORMAT " should be an object start" , p2i(last)); |
544 | return last; |
545 | } |
546 | } |
547 | |
548 | size_t ContiguousSpace::block_size(const HeapWord* p) const { |
549 | assert(MemRegion(bottom(), end()).contains(p), |
550 | "p (" PTR_FORMAT ") not in space [" PTR_FORMAT ", " PTR_FORMAT ")" , |
551 | p2i(p), p2i(bottom()), p2i(end())); |
552 | HeapWord* current_top = top(); |
553 | assert(p <= current_top, |
554 | "p > current top - p: " PTR_FORMAT ", current top: " PTR_FORMAT, |
555 | p2i(p), p2i(current_top)); |
556 | assert(p == current_top || oopDesc::is_oop(oop(p)), |
557 | "p (" PTR_FORMAT ") is not a block start - " |
558 | "current_top: " PTR_FORMAT ", is_oop: %s" , |
559 | p2i(p), p2i(current_top), BOOL_TO_STR(oopDesc::is_oop(oop(p)))); |
560 | if (p < current_top) { |
561 | return oop(p)->size(); |
562 | } else { |
563 | assert(p == current_top, "just checking" ); |
564 | return pointer_delta(end(), (HeapWord*) p); |
565 | } |
566 | } |
567 | |
568 | // This version requires locking. |
569 | inline HeapWord* ContiguousSpace::allocate_impl(size_t size) { |
570 | assert(Heap_lock->owned_by_self() || |
571 | (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), |
572 | "not locked" ); |
573 | HeapWord* obj = top(); |
574 | if (pointer_delta(end(), obj) >= size) { |
575 | HeapWord* new_top = obj + size; |
576 | set_top(new_top); |
577 | assert(is_aligned(obj) && is_aligned(new_top), "checking alignment" ); |
578 | return obj; |
579 | } else { |
580 | return NULL; |
581 | } |
582 | } |
583 | |
584 | // This version is lock-free. |
585 | inline HeapWord* ContiguousSpace::par_allocate_impl(size_t size) { |
586 | do { |
587 | HeapWord* obj = top(); |
588 | if (pointer_delta(end(), obj) >= size) { |
589 | HeapWord* new_top = obj + size; |
590 | HeapWord* result = Atomic::cmpxchg(new_top, top_addr(), obj); |
591 | // result can be one of two: |
592 | // the old top value: the exchange succeeded |
593 | // otherwise: the new value of the top is returned. |
594 | if (result == obj) { |
595 | assert(is_aligned(obj) && is_aligned(new_top), "checking alignment" ); |
596 | return obj; |
597 | } |
598 | } else { |
599 | return NULL; |
600 | } |
601 | } while (true); |
602 | } |
603 | |
604 | HeapWord* ContiguousSpace::allocate_aligned(size_t size) { |
605 | assert(Heap_lock->owned_by_self() || (SafepointSynchronize::is_at_safepoint() && Thread::current()->is_VM_thread()), "not locked" ); |
606 | HeapWord* end_value = end(); |
607 | |
608 | HeapWord* obj = CollectedHeap::align_allocation_or_fail(top(), end_value, SurvivorAlignmentInBytes); |
609 | if (obj == NULL) { |
610 | return NULL; |
611 | } |
612 | |
613 | if (pointer_delta(end_value, obj) >= size) { |
614 | HeapWord* new_top = obj + size; |
615 | set_top(new_top); |
616 | assert(::is_aligned(obj, SurvivorAlignmentInBytes) && is_aligned(new_top), |
617 | "checking alignment" ); |
618 | return obj; |
619 | } else { |
620 | set_top(obj); |
621 | return NULL; |
622 | } |
623 | } |
624 | |
625 | // Requires locking. |
626 | HeapWord* ContiguousSpace::allocate(size_t size) { |
627 | return allocate_impl(size); |
628 | } |
629 | |
630 | // Lock-free. |
631 | HeapWord* ContiguousSpace::par_allocate(size_t size) { |
632 | return par_allocate_impl(size); |
633 | } |
634 | |
635 | void ContiguousSpace::allocate_temporary_filler(int factor) { |
636 | // allocate temporary type array decreasing free size with factor 'factor' |
637 | assert(factor >= 0, "just checking" ); |
638 | size_t size = pointer_delta(end(), top()); |
639 | |
640 | // if space is full, return |
641 | if (size == 0) return; |
642 | |
643 | if (factor > 0) { |
644 | size -= size/factor; |
645 | } |
646 | size = align_object_size(size); |
647 | |
648 | const size_t = typeArrayOopDesc::header_size(T_INT); |
649 | if (size >= align_object_size(array_header_size)) { |
650 | size_t length = (size - array_header_size) * (HeapWordSize / sizeof(jint)); |
651 | // allocate uninitialized int array |
652 | typeArrayOop t = (typeArrayOop) allocate(size); |
653 | assert(t != NULL, "allocation should succeed" ); |
654 | t->set_mark_raw(markOopDesc::prototype()); |
655 | t->set_klass(Universe::intArrayKlassObj()); |
656 | t->set_length((int)length); |
657 | } else { |
658 | assert(size == CollectedHeap::min_fill_size(), |
659 | "size for smallest fake object doesn't match" ); |
660 | instanceOop obj = (instanceOop) allocate(size); |
661 | obj->set_mark_raw(markOopDesc::prototype()); |
662 | obj->set_klass_gap(0); |
663 | obj->set_klass(SystemDictionary::Object_klass()); |
664 | } |
665 | } |
666 | |
667 | HeapWord* OffsetTableContigSpace::initialize_threshold() { |
668 | return _offsets.initialize_threshold(); |
669 | } |
670 | |
671 | HeapWord* OffsetTableContigSpace::cross_threshold(HeapWord* start, HeapWord* end) { |
672 | _offsets.alloc_block(start, end); |
673 | return _offsets.threshold(); |
674 | } |
675 | |
676 | OffsetTableContigSpace::OffsetTableContigSpace(BlockOffsetSharedArray* sharedOffsetArray, |
677 | MemRegion mr) : |
678 | _offsets(sharedOffsetArray, mr), |
679 | _par_alloc_lock(Mutex::leaf, "OffsetTableContigSpace par alloc lock" , true) |
680 | { |
681 | _offsets.set_contig_space(this); |
682 | initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle); |
683 | } |
684 | |
685 | #define OBJ_SAMPLE_INTERVAL 0 |
686 | #define BLOCK_SAMPLE_INTERVAL 100 |
687 | |
688 | void OffsetTableContigSpace::verify() const { |
689 | HeapWord* p = bottom(); |
690 | HeapWord* prev_p = NULL; |
691 | int objs = 0; |
692 | int blocks = 0; |
693 | |
694 | if (VerifyObjectStartArray) { |
695 | _offsets.verify(); |
696 | } |
697 | |
698 | while (p < top()) { |
699 | size_t size = oop(p)->size(); |
700 | // For a sampling of objects in the space, find it using the |
701 | // block offset table. |
702 | if (blocks == BLOCK_SAMPLE_INTERVAL) { |
703 | guarantee(p == block_start_const(p + (size/2)), |
704 | "check offset computation" ); |
705 | blocks = 0; |
706 | } else { |
707 | blocks++; |
708 | } |
709 | |
710 | if (objs == OBJ_SAMPLE_INTERVAL) { |
711 | oopDesc::verify(oop(p)); |
712 | objs = 0; |
713 | } else { |
714 | objs++; |
715 | } |
716 | prev_p = p; |
717 | p += size; |
718 | } |
719 | guarantee(p == top(), "end of last object must match end of space" ); |
720 | } |
721 | |
722 | |
723 | size_t TenuredSpace::allowed_dead_ratio() const { |
724 | return MarkSweepDeadRatio; |
725 | } |
726 | |