1 | /* |
2 | * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/parallel/gcTaskManager.hpp" |
27 | #include "gc/parallel/objectStartArray.inline.hpp" |
28 | #include "gc/parallel/parallelScavengeHeap.inline.hpp" |
29 | #include "gc/parallel/psCardTable.hpp" |
30 | #include "gc/parallel/psPromotionManager.inline.hpp" |
31 | #include "gc/parallel/psScavenge.inline.hpp" |
32 | #include "gc/parallel/psTasks.hpp" |
33 | #include "gc/parallel/psYoungGen.hpp" |
34 | #include "memory/iterator.inline.hpp" |
35 | #include "oops/access.inline.hpp" |
36 | #include "oops/oop.inline.hpp" |
37 | #include "runtime/prefetch.inline.hpp" |
38 | #include "utilities/align.hpp" |
39 | |
40 | // Checks an individual oop for missing precise marks. Mark |
41 | // may be either dirty or newgen. |
42 | class CheckForUnmarkedOops : public BasicOopIterateClosure { |
43 | private: |
44 | PSYoungGen* _young_gen; |
45 | PSCardTable* _card_table; |
46 | HeapWord* _unmarked_addr; |
47 | |
48 | protected: |
49 | template <class T> void do_oop_work(T* p) { |
50 | oop obj = RawAccess<>::oop_load(p); |
51 | if (_young_gen->is_in_reserved(obj) && |
52 | !_card_table->addr_is_marked_imprecise(p)) { |
53 | // Don't overwrite the first missing card mark |
54 | if (_unmarked_addr == NULL) { |
55 | _unmarked_addr = (HeapWord*)p; |
56 | } |
57 | } |
58 | } |
59 | |
60 | public: |
61 | CheckForUnmarkedOops(PSYoungGen* young_gen, PSCardTable* card_table) : |
62 | _young_gen(young_gen), _card_table(card_table), _unmarked_addr(NULL) { } |
63 | |
64 | virtual void do_oop(oop* p) { CheckForUnmarkedOops::do_oop_work(p); } |
65 | virtual void do_oop(narrowOop* p) { CheckForUnmarkedOops::do_oop_work(p); } |
66 | |
67 | bool has_unmarked_oop() { |
68 | return _unmarked_addr != NULL; |
69 | } |
70 | }; |
71 | |
72 | // Checks all objects for the existence of some type of mark, |
73 | // precise or imprecise, dirty or newgen. |
74 | class CheckForUnmarkedObjects : public ObjectClosure { |
75 | private: |
76 | PSYoungGen* _young_gen; |
77 | PSCardTable* _card_table; |
78 | |
79 | public: |
80 | CheckForUnmarkedObjects() { |
81 | ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
82 | _young_gen = heap->young_gen(); |
83 | _card_table = heap->card_table(); |
84 | } |
85 | |
86 | // Card marks are not precise. The current system can leave us with |
87 | // a mismatch of precise marks and beginning of object marks. This means |
88 | // we test for missing precise marks first. If any are found, we don't |
89 | // fail unless the object head is also unmarked. |
90 | virtual void do_object(oop obj) { |
91 | CheckForUnmarkedOops object_check(_young_gen, _card_table); |
92 | obj->oop_iterate(&object_check); |
93 | if (object_check.has_unmarked_oop()) { |
94 | guarantee(_card_table->addr_is_marked_imprecise(obj), "Found unmarked young_gen object" ); |
95 | } |
96 | } |
97 | }; |
98 | |
99 | // Checks for precise marking of oops as newgen. |
100 | class CheckForPreciseMarks : public BasicOopIterateClosure { |
101 | private: |
102 | PSYoungGen* _young_gen; |
103 | PSCardTable* _card_table; |
104 | |
105 | protected: |
106 | template <class T> void do_oop_work(T* p) { |
107 | oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); |
108 | if (_young_gen->is_in_reserved(obj)) { |
109 | assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop" ); |
110 | _card_table->set_card_newgen(p); |
111 | } |
112 | } |
113 | |
114 | public: |
115 | CheckForPreciseMarks(PSYoungGen* young_gen, PSCardTable* card_table) : |
116 | _young_gen(young_gen), _card_table(card_table) { } |
117 | |
118 | virtual void do_oop(oop* p) { CheckForPreciseMarks::do_oop_work(p); } |
119 | virtual void do_oop(narrowOop* p) { CheckForPreciseMarks::do_oop_work(p); } |
120 | }; |
121 | |
122 | // We get passed the space_top value to prevent us from traversing into |
123 | // the old_gen promotion labs, which cannot be safely parsed. |
124 | |
125 | // Do not call this method if the space is empty. |
126 | // It is a waste to start tasks and get here only to |
127 | // do no work. If this method needs to be called |
128 | // when the space is empty, fix the calculation of |
129 | // end_card to allow sp_top == sp->bottom(). |
130 | |
131 | void PSCardTable::scavenge_contents_parallel(ObjectStartArray* start_array, |
132 | MutableSpace* sp, |
133 | HeapWord* space_top, |
134 | PSPromotionManager* pm, |
135 | uint stripe_number, |
136 | uint stripe_total) { |
137 | int ssize = 128; // Naked constant! Work unit = 64k. |
138 | int dirty_card_count = 0; |
139 | |
140 | // It is a waste to get here if empty. |
141 | assert(sp->bottom() < sp->top(), "Should not be called if empty" ); |
142 | oop* sp_top = (oop*)space_top; |
143 | CardValue* start_card = byte_for(sp->bottom()); |
144 | CardValue* end_card = byte_for(sp_top - 1) + 1; |
145 | oop* last_scanned = NULL; // Prevent scanning objects more than once |
146 | // The width of the stripe ssize*stripe_total must be |
147 | // consistent with the number of stripes so that the complete slice |
148 | // is covered. |
149 | size_t slice_width = ssize * stripe_total; |
150 | for (CardValue* slice = start_card; slice < end_card; slice += slice_width) { |
151 | CardValue* worker_start_card = slice + stripe_number * ssize; |
152 | if (worker_start_card >= end_card) |
153 | return; // We're done. |
154 | |
155 | CardValue* worker_end_card = worker_start_card + ssize; |
156 | if (worker_end_card > end_card) |
157 | worker_end_card = end_card; |
158 | |
159 | // We do not want to scan objects more than once. In order to accomplish |
160 | // this, we assert that any object with an object head inside our 'slice' |
161 | // belongs to us. We may need to extend the range of scanned cards if the |
162 | // last object continues into the next 'slice'. |
163 | // |
164 | // Note! ending cards are exclusive! |
165 | HeapWord* slice_start = addr_for(worker_start_card); |
166 | HeapWord* slice_end = MIN2((HeapWord*) sp_top, addr_for(worker_end_card)); |
167 | |
168 | #ifdef ASSERT |
169 | if (GCWorkerDelayMillis > 0) { |
170 | // Delay 1 worker so that it proceeds after all the work |
171 | // has been completed. |
172 | if (stripe_number < 2) { |
173 | os::sleep(Thread::current(), GCWorkerDelayMillis, false); |
174 | } |
175 | } |
176 | #endif |
177 | |
178 | // If there are not objects starting within the chunk, skip it. |
179 | if (!start_array->object_starts_in_range(slice_start, slice_end)) { |
180 | continue; |
181 | } |
182 | // Update our beginning addr |
183 | HeapWord* first_object = start_array->object_start(slice_start); |
184 | debug_only(oop* first_object_within_slice = (oop*) first_object;) |
185 | if (first_object < slice_start) { |
186 | last_scanned = (oop*)(first_object + oop(first_object)->size()); |
187 | debug_only(first_object_within_slice = last_scanned;) |
188 | worker_start_card = byte_for(last_scanned); |
189 | } |
190 | |
191 | // Update the ending addr |
192 | if (slice_end < (HeapWord*)sp_top) { |
193 | // The subtraction is important! An object may start precisely at slice_end. |
194 | HeapWord* last_object = start_array->object_start(slice_end - 1); |
195 | slice_end = last_object + oop(last_object)->size(); |
196 | // worker_end_card is exclusive, so bump it one past the end of last_object's |
197 | // covered span. |
198 | worker_end_card = byte_for(slice_end) + 1; |
199 | |
200 | if (worker_end_card > end_card) |
201 | worker_end_card = end_card; |
202 | } |
203 | |
204 | assert(slice_end <= (HeapWord*)sp_top, "Last object in slice crosses space boundary" ); |
205 | assert(is_valid_card_address(worker_start_card), "Invalid worker start card" ); |
206 | assert(is_valid_card_address(worker_end_card), "Invalid worker end card" ); |
207 | // Note that worker_start_card >= worker_end_card is legal, and happens when |
208 | // an object spans an entire slice. |
209 | assert(worker_start_card <= end_card, "worker start card beyond end card" ); |
210 | assert(worker_end_card <= end_card, "worker end card beyond end card" ); |
211 | |
212 | CardValue* current_card = worker_start_card; |
213 | while (current_card < worker_end_card) { |
214 | // Find an unclean card. |
215 | while (current_card < worker_end_card && card_is_clean(*current_card)) { |
216 | current_card++; |
217 | } |
218 | CardValue* first_unclean_card = current_card; |
219 | |
220 | // Find the end of a run of contiguous unclean cards |
221 | while (current_card < worker_end_card && !card_is_clean(*current_card)) { |
222 | while (current_card < worker_end_card && !card_is_clean(*current_card)) { |
223 | current_card++; |
224 | } |
225 | |
226 | if (current_card < worker_end_card) { |
227 | // Some objects may be large enough to span several cards. If such |
228 | // an object has more than one dirty card, separated by a clean card, |
229 | // we will attempt to scan it twice. The test against "last_scanned" |
230 | // prevents the redundant object scan, but it does not prevent newly |
231 | // marked cards from being cleaned. |
232 | HeapWord* last_object_in_dirty_region = start_array->object_start(addr_for(current_card)-1); |
233 | size_t size_of_last_object = oop(last_object_in_dirty_region)->size(); |
234 | HeapWord* end_of_last_object = last_object_in_dirty_region + size_of_last_object; |
235 | CardValue* ending_card_of_last_object = byte_for(end_of_last_object); |
236 | assert(ending_card_of_last_object <= worker_end_card, "ending_card_of_last_object is greater than worker_end_card" ); |
237 | if (ending_card_of_last_object > current_card) { |
238 | // This means the object spans the next complete card. |
239 | // We need to bump the current_card to ending_card_of_last_object |
240 | current_card = ending_card_of_last_object; |
241 | } |
242 | } |
243 | } |
244 | CardValue* following_clean_card = current_card; |
245 | |
246 | if (first_unclean_card < worker_end_card) { |
247 | oop* p = (oop*) start_array->object_start(addr_for(first_unclean_card)); |
248 | assert((HeapWord*)p <= addr_for(first_unclean_card), "checking" ); |
249 | // "p" should always be >= "last_scanned" because newly GC dirtied |
250 | // cards are no longer scanned again (see comment at end |
251 | // of loop on the increment of "current_card"). Test that |
252 | // hypothesis before removing this code. |
253 | // If this code is removed, deal with the first time through |
254 | // the loop when the last_scanned is the object starting in |
255 | // the previous slice. |
256 | assert((p >= last_scanned) || |
257 | (last_scanned == first_object_within_slice), |
258 | "Should no longer be possible" ); |
259 | if (p < last_scanned) { |
260 | // Avoid scanning more than once; this can happen because |
261 | // newgen cards set by GC may a different set than the |
262 | // originally dirty set |
263 | p = last_scanned; |
264 | } |
265 | oop* to = (oop*)addr_for(following_clean_card); |
266 | |
267 | // Test slice_end first! |
268 | if ((HeapWord*)to > slice_end) { |
269 | to = (oop*)slice_end; |
270 | } else if (to > sp_top) { |
271 | to = sp_top; |
272 | } |
273 | |
274 | // we know which cards to scan, now clear them |
275 | if (first_unclean_card <= worker_start_card+1) |
276 | first_unclean_card = worker_start_card+1; |
277 | if (following_clean_card >= worker_end_card-1) |
278 | following_clean_card = worker_end_card-1; |
279 | |
280 | while (first_unclean_card < following_clean_card) { |
281 | *first_unclean_card++ = clean_card; |
282 | } |
283 | |
284 | const int interval = PrefetchScanIntervalInBytes; |
285 | // scan all objects in the range |
286 | if (interval != 0) { |
287 | while (p < to) { |
288 | Prefetch::write(p, interval); |
289 | oop m = oop(p); |
290 | assert(oopDesc::is_oop_or_null(m), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)); |
291 | pm->push_contents(m); |
292 | p += m->size(); |
293 | } |
294 | pm->drain_stacks_cond_depth(); |
295 | } else { |
296 | while (p < to) { |
297 | oop m = oop(p); |
298 | assert(oopDesc::is_oop_or_null(m), "Expected an oop or NULL for header field at " PTR_FORMAT, p2i(m)); |
299 | pm->push_contents(m); |
300 | p += m->size(); |
301 | } |
302 | pm->drain_stacks_cond_depth(); |
303 | } |
304 | last_scanned = p; |
305 | } |
306 | // "current_card" is still the "following_clean_card" or |
307 | // the current_card is >= the worker_end_card so the |
308 | // loop will not execute again. |
309 | assert((current_card == following_clean_card) || |
310 | (current_card >= worker_end_card), |
311 | "current_card should only be incremented if it still equals " |
312 | "following_clean_card" ); |
313 | // Increment current_card so that it is not processed again. |
314 | // It may now be dirty because a old-to-young pointer was |
315 | // found on it an updated. If it is now dirty, it cannot be |
316 | // be safely cleaned in the next iteration. |
317 | current_card++; |
318 | } |
319 | } |
320 | } |
321 | |
322 | // This should be called before a scavenge. |
323 | void PSCardTable::verify_all_young_refs_imprecise() { |
324 | CheckForUnmarkedObjects check; |
325 | |
326 | ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
327 | PSOldGen* old_gen = heap->old_gen(); |
328 | |
329 | old_gen->object_iterate(&check); |
330 | } |
331 | |
332 | // This should be called immediately after a scavenge, before mutators resume. |
333 | void PSCardTable::verify_all_young_refs_precise() { |
334 | ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
335 | PSOldGen* old_gen = heap->old_gen(); |
336 | |
337 | CheckForPreciseMarks check(heap->young_gen(), this); |
338 | |
339 | old_gen->oop_iterate(&check); |
340 | |
341 | verify_all_young_refs_precise_helper(old_gen->object_space()->used_region()); |
342 | } |
343 | |
344 | void PSCardTable::verify_all_young_refs_precise_helper(MemRegion mr) { |
345 | CardValue* bot = byte_for(mr.start()); |
346 | CardValue* top = byte_for(mr.end()); |
347 | while (bot <= top) { |
348 | assert(*bot == clean_card || *bot == verify_card, "Found unwanted or unknown card mark" ); |
349 | if (*bot == verify_card) |
350 | *bot = youngergen_card; |
351 | bot++; |
352 | } |
353 | } |
354 | |
355 | bool PSCardTable::addr_is_marked_imprecise(void *addr) { |
356 | CardValue* p = byte_for(addr); |
357 | CardValue val = *p; |
358 | |
359 | if (card_is_dirty(val)) |
360 | return true; |
361 | |
362 | if (card_is_newgen(val)) |
363 | return true; |
364 | |
365 | if (card_is_clean(val)) |
366 | return false; |
367 | |
368 | assert(false, "Found unhandled card mark type" ); |
369 | |
370 | return false; |
371 | } |
372 | |
373 | // Also includes verify_card |
374 | bool PSCardTable::addr_is_marked_precise(void *addr) { |
375 | CardValue* p = byte_for(addr); |
376 | CardValue val = *p; |
377 | |
378 | if (card_is_newgen(val)) |
379 | return true; |
380 | |
381 | if (card_is_verify(val)) |
382 | return true; |
383 | |
384 | if (card_is_clean(val)) |
385 | return false; |
386 | |
387 | if (card_is_dirty(val)) |
388 | return false; |
389 | |
390 | assert(false, "Found unhandled card mark type" ); |
391 | |
392 | return false; |
393 | } |
394 | |
395 | // Assumes that only the base or the end changes. This allows indentification |
396 | // of the region that is being resized. The |
397 | // CardTable::resize_covered_region() is used for the normal case |
398 | // where the covered regions are growing or shrinking at the high end. |
399 | // The method resize_covered_region_by_end() is analogous to |
400 | // CardTable::resize_covered_region() but |
401 | // for regions that grow or shrink at the low end. |
402 | void PSCardTable::resize_covered_region(MemRegion new_region) { |
403 | for (int i = 0; i < _cur_covered_regions; i++) { |
404 | if (_covered[i].start() == new_region.start()) { |
405 | // Found a covered region with the same start as the |
406 | // new region. The region is growing or shrinking |
407 | // from the start of the region. |
408 | resize_covered_region_by_start(new_region); |
409 | return; |
410 | } |
411 | if (_covered[i].start() > new_region.start()) { |
412 | break; |
413 | } |
414 | } |
415 | |
416 | int changed_region = -1; |
417 | for (int j = 0; j < _cur_covered_regions; j++) { |
418 | if (_covered[j].end() == new_region.end()) { |
419 | changed_region = j; |
420 | // This is a case where the covered region is growing or shrinking |
421 | // at the start of the region. |
422 | assert(changed_region != -1, "Don't expect to add a covered region" ); |
423 | assert(_covered[changed_region].byte_size() != new_region.byte_size(), |
424 | "The sizes should be different here" ); |
425 | resize_covered_region_by_end(changed_region, new_region); |
426 | return; |
427 | } |
428 | } |
429 | // This should only be a new covered region (where no existing |
430 | // covered region matches at the start or the end). |
431 | assert(_cur_covered_regions < _max_covered_regions, |
432 | "An existing region should have been found" ); |
433 | resize_covered_region_by_start(new_region); |
434 | } |
435 | |
436 | void PSCardTable::resize_covered_region_by_start(MemRegion new_region) { |
437 | CardTable::resize_covered_region(new_region); |
438 | debug_only(verify_guard();) |
439 | } |
440 | |
441 | void PSCardTable::resize_covered_region_by_end(int changed_region, |
442 | MemRegion new_region) { |
443 | assert(SafepointSynchronize::is_at_safepoint(), |
444 | "Only expect an expansion at the low end at a GC" ); |
445 | debug_only(verify_guard();) |
446 | #ifdef ASSERT |
447 | for (int k = 0; k < _cur_covered_regions; k++) { |
448 | if (_covered[k].end() == new_region.end()) { |
449 | assert(changed_region == k, "Changed region is incorrect" ); |
450 | break; |
451 | } |
452 | } |
453 | #endif |
454 | |
455 | // Commit new or uncommit old pages, if necessary. |
456 | if (resize_commit_uncommit(changed_region, new_region)) { |
457 | // Set the new start of the committed region |
458 | resize_update_committed_table(changed_region, new_region); |
459 | } |
460 | |
461 | // Update card table entries |
462 | resize_update_card_table_entries(changed_region, new_region); |
463 | |
464 | // Update the covered region |
465 | resize_update_covered_table(changed_region, new_region); |
466 | |
467 | int ind = changed_region; |
468 | log_trace(gc, barrier)("CardTable::resize_covered_region: " ); |
469 | log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT, |
470 | ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); |
471 | log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, |
472 | ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); |
473 | log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, |
474 | p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last()))); |
475 | log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, |
476 | p2i(addr_for((CardValue*) _committed[ind].start())), p2i(addr_for((CardValue*) _committed[ind].last()))); |
477 | |
478 | debug_only(verify_guard();) |
479 | } |
480 | |
481 | bool PSCardTable::resize_commit_uncommit(int changed_region, |
482 | MemRegion new_region) { |
483 | bool result = false; |
484 | // Commit new or uncommit old pages, if necessary. |
485 | MemRegion cur_committed = _committed[changed_region]; |
486 | assert(_covered[changed_region].end() == new_region.end(), |
487 | "The ends of the regions are expected to match" ); |
488 | // Extend the start of this _committed region to |
489 | // to cover the start of any previous _committed region. |
490 | // This forms overlapping regions, but never interior regions. |
491 | HeapWord* min_prev_start = lowest_prev_committed_start(changed_region); |
492 | if (min_prev_start < cur_committed.start()) { |
493 | // Only really need to set start of "cur_committed" to |
494 | // the new start (min_prev_start) but assertion checking code |
495 | // below use cur_committed.end() so make it correct. |
496 | MemRegion new_committed = |
497 | MemRegion(min_prev_start, cur_committed.end()); |
498 | cur_committed = new_committed; |
499 | } |
500 | #ifdef ASSERT |
501 | ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); |
502 | assert(cur_committed.start() == align_up(cur_committed.start(), os::vm_page_size()), |
503 | "Starts should have proper alignment" ); |
504 | #endif |
505 | |
506 | CardValue* new_start = byte_for(new_region.start()); |
507 | // Round down because this is for the start address |
508 | HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size()); |
509 | // The guard page is always committed and should not be committed over. |
510 | // This method is used in cases where the generation is growing toward |
511 | // lower addresses but the guard region is still at the end of the |
512 | // card table. That still makes sense when looking for writes |
513 | // off the end of the card table. |
514 | if (new_start_aligned < cur_committed.start()) { |
515 | // Expand the committed region |
516 | // |
517 | // Case A |
518 | // |+ guard +| |
519 | // |+ cur committed +++++++++| |
520 | // |+ new committed +++++++++++++++++| |
521 | // |
522 | // Case B |
523 | // |+ guard +| |
524 | // |+ cur committed +| |
525 | // |+ new committed +++++++| |
526 | // |
527 | // These are not expected because the calculation of the |
528 | // cur committed region and the new committed region |
529 | // share the same end for the covered region. |
530 | // Case C |
531 | // |+ guard +| |
532 | // |+ cur committed +| |
533 | // |+ new committed +++++++++++++++++| |
534 | // Case D |
535 | // |+ guard +| |
536 | // |+ cur committed +++++++++++| |
537 | // |+ new committed +++++++| |
538 | |
539 | HeapWord* new_end_for_commit = |
540 | MIN2(cur_committed.end(), _guard_region.start()); |
541 | if(new_start_aligned < new_end_for_commit) { |
542 | MemRegion new_committed = |
543 | MemRegion(new_start_aligned, new_end_for_commit); |
544 | os::commit_memory_or_exit((char*)new_committed.start(), |
545 | new_committed.byte_size(), !ExecMem, |
546 | "card table expansion" ); |
547 | } |
548 | result = true; |
549 | } else if (new_start_aligned > cur_committed.start()) { |
550 | // Shrink the committed region |
551 | #if 0 // uncommitting space is currently unsafe because of the interactions |
552 | // of growing and shrinking regions. One region A can uncommit space |
553 | // that it owns but which is being used by another region B (maybe). |
554 | // Region B has not committed the space because it was already |
555 | // committed by region A. |
556 | MemRegion uncommit_region = committed_unique_to_self(changed_region, |
557 | MemRegion(cur_committed.start(), new_start_aligned)); |
558 | if (!uncommit_region.is_empty()) { |
559 | if (!os::uncommit_memory((char*)uncommit_region.start(), |
560 | uncommit_region.byte_size())) { |
561 | // If the uncommit fails, ignore it. Let the |
562 | // committed table resizing go even though the committed |
563 | // table will over state the committed space. |
564 | } |
565 | } |
566 | #else |
567 | assert(!result, "Should be false with current workaround" ); |
568 | #endif |
569 | } |
570 | assert(_committed[changed_region].end() == cur_committed.end(), |
571 | "end should not change" ); |
572 | return result; |
573 | } |
574 | |
575 | void PSCardTable::resize_update_committed_table(int changed_region, |
576 | MemRegion new_region) { |
577 | |
578 | CardValue* new_start = byte_for(new_region.start()); |
579 | // Set the new start of the committed region |
580 | HeapWord* new_start_aligned = align_down((HeapWord*)new_start, os::vm_page_size()); |
581 | MemRegion new_committed = MemRegion(new_start_aligned, |
582 | _committed[changed_region].end()); |
583 | _committed[changed_region] = new_committed; |
584 | _committed[changed_region].set_start(new_start_aligned); |
585 | } |
586 | |
587 | void PSCardTable::resize_update_card_table_entries(int changed_region, |
588 | MemRegion new_region) { |
589 | debug_only(verify_guard();) |
590 | MemRegion original_covered = _covered[changed_region]; |
591 | // Initialize the card entries. Only consider the |
592 | // region covered by the card table (_whole_heap) |
593 | CardValue* entry; |
594 | if (new_region.start() < _whole_heap.start()) { |
595 | entry = byte_for(_whole_heap.start()); |
596 | } else { |
597 | entry = byte_for(new_region.start()); |
598 | } |
599 | CardValue* end = byte_for(original_covered.start()); |
600 | // If _whole_heap starts at the original covered regions start, |
601 | // this loop will not execute. |
602 | while (entry < end) { *entry++ = clean_card; } |
603 | } |
604 | |
605 | void PSCardTable::resize_update_covered_table(int changed_region, |
606 | MemRegion new_region) { |
607 | // Update the covered region |
608 | _covered[changed_region].set_start(new_region.start()); |
609 | _covered[changed_region].set_word_size(new_region.word_size()); |
610 | |
611 | // reorder regions. There should only be at most 1 out |
612 | // of order. |
613 | for (int i = _cur_covered_regions-1 ; i > 0; i--) { |
614 | if (_covered[i].start() < _covered[i-1].start()) { |
615 | MemRegion covered_mr = _covered[i-1]; |
616 | _covered[i-1] = _covered[i]; |
617 | _covered[i] = covered_mr; |
618 | MemRegion committed_mr = _committed[i-1]; |
619 | _committed[i-1] = _committed[i]; |
620 | _committed[i] = committed_mr; |
621 | break; |
622 | } |
623 | } |
624 | #ifdef ASSERT |
625 | for (int m = 0; m < _cur_covered_regions-1; m++) { |
626 | assert(_covered[m].start() <= _covered[m+1].start(), |
627 | "Covered regions out of order" ); |
628 | assert(_committed[m].start() <= _committed[m+1].start(), |
629 | "Committed regions out of order" ); |
630 | } |
631 | #endif |
632 | } |
633 | |
634 | // Returns the start of any committed region that is lower than |
635 | // the target committed region (index ind) and that intersects the |
636 | // target region. If none, return start of target region. |
637 | // |
638 | // ------------- |
639 | // | | |
640 | // ------------- |
641 | // ------------ |
642 | // | target | |
643 | // ------------ |
644 | // ------------- |
645 | // | | |
646 | // ------------- |
647 | // ^ returns this |
648 | // |
649 | // ------------- |
650 | // | | |
651 | // ------------- |
652 | // ------------ |
653 | // | target | |
654 | // ------------ |
655 | // ------------- |
656 | // | | |
657 | // ------------- |
658 | // ^ returns this |
659 | |
660 | HeapWord* PSCardTable::lowest_prev_committed_start(int ind) const { |
661 | assert(_cur_covered_regions >= 0, "Expecting at least on region" ); |
662 | HeapWord* min_start = _committed[ind].start(); |
663 | for (int j = 0; j < ind; j++) { |
664 | HeapWord* this_start = _committed[j].start(); |
665 | if ((this_start < min_start) && |
666 | !(_committed[j].intersection(_committed[ind])).is_empty()) { |
667 | min_start = this_start; |
668 | } |
669 | } |
670 | return min_start; |
671 | } |
672 | |
673 | bool PSCardTable::is_in_young(oop obj) const { |
674 | return ParallelScavengeHeap::heap()->is_in_young(obj); |
675 | } |
676 | |