1 | /* |
2 | * Copyright (c) 2000, 2018, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "gc/shared/cardTable.hpp" |
27 | #include "gc/shared/collectedHeap.hpp" |
28 | #include "gc/shared/space.inline.hpp" |
29 | #include "logging/log.hpp" |
30 | #include "memory/virtualspace.hpp" |
31 | #include "runtime/java.hpp" |
32 | #include "runtime/os.hpp" |
33 | #include "services/memTracker.hpp" |
34 | #include "utilities/align.hpp" |
35 | |
36 | size_t CardTable::compute_byte_map_size() { |
37 | assert(_guard_index == cards_required(_whole_heap.word_size()) - 1, |
38 | "uninitialized, check declaration order" ); |
39 | assert(_page_size != 0, "uninitialized, check declaration order" ); |
40 | const size_t granularity = os::vm_allocation_granularity(); |
41 | return align_up(_guard_index + 1, MAX2(_page_size, granularity)); |
42 | } |
43 | |
44 | CardTable::CardTable(MemRegion whole_heap, bool conc_scan) : |
45 | _scanned_concurrently(conc_scan), |
46 | _whole_heap(whole_heap), |
47 | _guard_index(0), |
48 | _last_valid_index(0), |
49 | _page_size(os::vm_page_size()), |
50 | _byte_map_size(0), |
51 | _byte_map(NULL), |
52 | _byte_map_base(NULL), |
53 | _cur_covered_regions(0), |
54 | _covered(NULL), |
55 | _committed(NULL), |
56 | _guard_region() |
57 | { |
58 | assert((uintptr_t(_whole_heap.start()) & (card_size - 1)) == 0, "heap must start at card boundary" ); |
59 | assert((uintptr_t(_whole_heap.end()) & (card_size - 1)) == 0, "heap must end at card boundary" ); |
60 | |
61 | assert(card_size <= 512, "card_size must be less than 512" ); // why? |
62 | |
63 | _covered = new MemRegion[_max_covered_regions]; |
64 | if (_covered == NULL) { |
65 | vm_exit_during_initialization("Could not allocate card table covered region set." ); |
66 | } |
67 | } |
68 | |
69 | CardTable::~CardTable() { |
70 | if (_covered) { |
71 | delete[] _covered; |
72 | _covered = NULL; |
73 | } |
74 | if (_committed) { |
75 | delete[] _committed; |
76 | _committed = NULL; |
77 | } |
78 | } |
79 | |
80 | void CardTable::initialize() { |
81 | _guard_index = cards_required(_whole_heap.word_size()) - 1; |
82 | _last_valid_index = _guard_index - 1; |
83 | |
84 | _byte_map_size = compute_byte_map_size(); |
85 | |
86 | HeapWord* low_bound = _whole_heap.start(); |
87 | HeapWord* high_bound = _whole_heap.end(); |
88 | |
89 | _cur_covered_regions = 0; |
90 | _committed = new MemRegion[_max_covered_regions]; |
91 | if (_committed == NULL) { |
92 | vm_exit_during_initialization("Could not allocate card table committed region set." ); |
93 | } |
94 | |
95 | const size_t rs_align = _page_size == (size_t) os::vm_page_size() ? 0 : |
96 | MAX2(_page_size, (size_t) os::vm_allocation_granularity()); |
97 | ReservedSpace heap_rs(_byte_map_size, rs_align, false); |
98 | |
99 | MemTracker::record_virtual_memory_type((address)heap_rs.base(), mtGC); |
100 | |
101 | os::trace_page_sizes("Card Table" , _guard_index + 1, _guard_index + 1, |
102 | _page_size, heap_rs.base(), heap_rs.size()); |
103 | if (!heap_rs.is_reserved()) { |
104 | vm_exit_during_initialization("Could not reserve enough space for the " |
105 | "card marking array" ); |
106 | } |
107 | |
108 | // The assembler store_check code will do an unsigned shift of the oop, |
109 | // then add it to _byte_map_base, i.e. |
110 | // |
111 | // _byte_map = _byte_map_base + (uintptr_t(low_bound) >> card_shift) |
112 | _byte_map = (CardValue*) heap_rs.base(); |
113 | _byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift); |
114 | assert(byte_for(low_bound) == &_byte_map[0], "Checking start of map" ); |
115 | assert(byte_for(high_bound-1) <= &_byte_map[_last_valid_index], "Checking end of map" ); |
116 | |
117 | CardValue* guard_card = &_byte_map[_guard_index]; |
118 | HeapWord* guard_page = align_down((HeapWord*)guard_card, _page_size); |
119 | _guard_region = MemRegion(guard_page, _page_size); |
120 | os::commit_memory_or_exit((char*)guard_page, _page_size, _page_size, |
121 | !ExecMem, "card table last card" ); |
122 | *guard_card = last_card; |
123 | |
124 | log_trace(gc, barrier)("CardTable::CardTable: " ); |
125 | log_trace(gc, barrier)(" &_byte_map[0]: " INTPTR_FORMAT " &_byte_map[_last_valid_index]: " INTPTR_FORMAT, |
126 | p2i(&_byte_map[0]), p2i(&_byte_map[_last_valid_index])); |
127 | log_trace(gc, barrier)(" _byte_map_base: " INTPTR_FORMAT, p2i(_byte_map_base)); |
128 | } |
129 | |
130 | int CardTable::find_covering_region_by_base(HeapWord* base) { |
131 | int i; |
132 | for (i = 0; i < _cur_covered_regions; i++) { |
133 | if (_covered[i].start() == base) return i; |
134 | if (_covered[i].start() > base) break; |
135 | } |
136 | // If we didn't find it, create a new one. |
137 | assert(_cur_covered_regions < _max_covered_regions, |
138 | "too many covered regions" ); |
139 | // Move the ones above up, to maintain sorted order. |
140 | for (int j = _cur_covered_regions; j > i; j--) { |
141 | _covered[j] = _covered[j-1]; |
142 | _committed[j] = _committed[j-1]; |
143 | } |
144 | int res = i; |
145 | _cur_covered_regions++; |
146 | _covered[res].set_start(base); |
147 | _covered[res].set_word_size(0); |
148 | CardValue* ct_start = byte_for(base); |
149 | HeapWord* ct_start_aligned = align_down((HeapWord*)ct_start, _page_size); |
150 | _committed[res].set_start(ct_start_aligned); |
151 | _committed[res].set_word_size(0); |
152 | return res; |
153 | } |
154 | |
155 | int CardTable::find_covering_region_containing(HeapWord* addr) { |
156 | for (int i = 0; i < _cur_covered_regions; i++) { |
157 | if (_covered[i].contains(addr)) { |
158 | return i; |
159 | } |
160 | } |
161 | assert(0, "address outside of heap?" ); |
162 | return -1; |
163 | } |
164 | |
165 | HeapWord* CardTable::largest_prev_committed_end(int ind) const { |
166 | HeapWord* max_end = NULL; |
167 | for (int j = 0; j < ind; j++) { |
168 | HeapWord* this_end = _committed[j].end(); |
169 | if (this_end > max_end) max_end = this_end; |
170 | } |
171 | return max_end; |
172 | } |
173 | |
174 | MemRegion CardTable::committed_unique_to_self(int self, MemRegion mr) const { |
175 | MemRegion result = mr; |
176 | for (int r = 0; r < _cur_covered_regions; r += 1) { |
177 | if (r != self) { |
178 | result = result.minus(_committed[r]); |
179 | } |
180 | } |
181 | // Never include the guard page. |
182 | result = result.minus(_guard_region); |
183 | return result; |
184 | } |
185 | |
186 | void CardTable::resize_covered_region(MemRegion new_region) { |
187 | // We don't change the start of a region, only the end. |
188 | assert(_whole_heap.contains(new_region), |
189 | "attempt to cover area not in reserved area" ); |
190 | debug_only(verify_guard();) |
191 | // collided is true if the expansion would push into another committed region |
192 | debug_only(bool collided = false;) |
193 | int const ind = find_covering_region_by_base(new_region.start()); |
194 | MemRegion const old_region = _covered[ind]; |
195 | assert(old_region.start() == new_region.start(), "just checking" ); |
196 | if (new_region.word_size() != old_region.word_size()) { |
197 | // Commit new or uncommit old pages, if necessary. |
198 | MemRegion cur_committed = _committed[ind]; |
199 | // Extend the end of this _committed region |
200 | // to cover the end of any lower _committed regions. |
201 | // This forms overlapping regions, but never interior regions. |
202 | HeapWord* const max_prev_end = largest_prev_committed_end(ind); |
203 | if (max_prev_end > cur_committed.end()) { |
204 | cur_committed.set_end(max_prev_end); |
205 | } |
206 | // Align the end up to a page size (starts are already aligned). |
207 | HeapWord* new_end = (HeapWord*) byte_after(new_region.last()); |
208 | HeapWord* new_end_aligned = align_up(new_end, _page_size); |
209 | assert(new_end_aligned >= new_end, "align up, but less" ); |
210 | // Check the other regions (excludes "ind") to ensure that |
211 | // the new_end_aligned does not intrude onto the committed |
212 | // space of another region. |
213 | int ri = 0; |
214 | for (ri = ind + 1; ri < _cur_covered_regions; ri++) { |
215 | if (new_end_aligned > _committed[ri].start()) { |
216 | assert(new_end_aligned <= _committed[ri].end(), |
217 | "An earlier committed region can't cover a later committed region" ); |
218 | // Any region containing the new end |
219 | // should start at or beyond the region found (ind) |
220 | // for the new end (committed regions are not expected to |
221 | // be proper subsets of other committed regions). |
222 | assert(_committed[ri].start() >= _committed[ind].start(), |
223 | "New end of committed region is inconsistent" ); |
224 | new_end_aligned = _committed[ri].start(); |
225 | // new_end_aligned can be equal to the start of its |
226 | // committed region (i.e., of "ind") if a second |
227 | // region following "ind" also start at the same location |
228 | // as "ind". |
229 | assert(new_end_aligned >= _committed[ind].start(), |
230 | "New end of committed region is before start" ); |
231 | debug_only(collided = true;) |
232 | // Should only collide with 1 region |
233 | break; |
234 | } |
235 | } |
236 | #ifdef ASSERT |
237 | for (++ri; ri < _cur_covered_regions; ri++) { |
238 | assert(!_committed[ri].contains(new_end_aligned), |
239 | "New end of committed region is in a second committed region" ); |
240 | } |
241 | #endif |
242 | // The guard page is always committed and should not be committed over. |
243 | // "guarded" is used for assertion checking below and recalls the fact |
244 | // that the would-be end of the new committed region would have |
245 | // penetrated the guard page. |
246 | HeapWord* new_end_for_commit = new_end_aligned; |
247 | |
248 | DEBUG_ONLY(bool guarded = false;) |
249 | if (new_end_for_commit > _guard_region.start()) { |
250 | new_end_for_commit = _guard_region.start(); |
251 | DEBUG_ONLY(guarded = true;) |
252 | } |
253 | |
254 | if (new_end_for_commit > cur_committed.end()) { |
255 | // Must commit new pages. |
256 | MemRegion const new_committed = |
257 | MemRegion(cur_committed.end(), new_end_for_commit); |
258 | |
259 | assert(!new_committed.is_empty(), "Region should not be empty here" ); |
260 | os::commit_memory_or_exit((char*)new_committed.start(), |
261 | new_committed.byte_size(), _page_size, |
262 | !ExecMem, "card table expansion" ); |
263 | // Use new_end_aligned (as opposed to new_end_for_commit) because |
264 | // the cur_committed region may include the guard region. |
265 | } else if (new_end_aligned < cur_committed.end()) { |
266 | // Must uncommit pages. |
267 | MemRegion const uncommit_region = |
268 | committed_unique_to_self(ind, MemRegion(new_end_aligned, |
269 | cur_committed.end())); |
270 | if (!uncommit_region.is_empty()) { |
271 | // It is not safe to uncommit cards if the boundary between |
272 | // the generations is moving. A shrink can uncommit cards |
273 | // owned by generation A but being used by generation B. |
274 | if (!UseAdaptiveGCBoundary) { |
275 | if (!os::uncommit_memory((char*)uncommit_region.start(), |
276 | uncommit_region.byte_size())) { |
277 | assert(false, "Card table contraction failed" ); |
278 | // The call failed so don't change the end of the |
279 | // committed region. This is better than taking the |
280 | // VM down. |
281 | new_end_aligned = _committed[ind].end(); |
282 | } |
283 | } else { |
284 | new_end_aligned = _committed[ind].end(); |
285 | } |
286 | } |
287 | } |
288 | // In any case, we can reset the end of the current committed entry. |
289 | _committed[ind].set_end(new_end_aligned); |
290 | |
291 | #ifdef ASSERT |
292 | // Check that the last card in the new region is committed according |
293 | // to the tables. |
294 | bool covered = false; |
295 | for (int cr = 0; cr < _cur_covered_regions; cr++) { |
296 | if (_committed[cr].contains(new_end - 1)) { |
297 | covered = true; |
298 | break; |
299 | } |
300 | } |
301 | assert(covered, "Card for end of new region not committed" ); |
302 | #endif |
303 | |
304 | // The default of 0 is not necessarily clean cards. |
305 | CardValue* entry; |
306 | if (old_region.last() < _whole_heap.start()) { |
307 | entry = byte_for(_whole_heap.start()); |
308 | } else { |
309 | entry = byte_after(old_region.last()); |
310 | } |
311 | assert(index_for(new_region.last()) < _guard_index, |
312 | "The guard card will be overwritten" ); |
313 | // This line commented out cleans the newly expanded region and |
314 | // not the aligned up expanded region. |
315 | // CardValue* const end = byte_after(new_region.last()); |
316 | CardValue* const end = (CardValue*) new_end_for_commit; |
317 | assert((end >= byte_after(new_region.last())) || collided || guarded, |
318 | "Expect to be beyond new region unless impacting another region" ); |
319 | // do nothing if we resized downward. |
320 | #ifdef ASSERT |
321 | for (int ri = 0; ri < _cur_covered_regions; ri++) { |
322 | if (ri != ind) { |
323 | // The end of the new committed region should not |
324 | // be in any existing region unless it matches |
325 | // the start of the next region. |
326 | assert(!_committed[ri].contains(end) || |
327 | (_committed[ri].start() == (HeapWord*) end), |
328 | "Overlapping committed regions" ); |
329 | } |
330 | } |
331 | #endif |
332 | if (entry < end) { |
333 | memset(entry, clean_card, pointer_delta(end, entry, sizeof(CardValue))); |
334 | } |
335 | } |
336 | // In any case, the covered size changes. |
337 | _covered[ind].set_word_size(new_region.word_size()); |
338 | |
339 | log_trace(gc, barrier)("CardTable::resize_covered_region: " ); |
340 | log_trace(gc, barrier)(" _covered[%d].start(): " INTPTR_FORMAT " _covered[%d].last(): " INTPTR_FORMAT, |
341 | ind, p2i(_covered[ind].start()), ind, p2i(_covered[ind].last())); |
342 | log_trace(gc, barrier)(" _committed[%d].start(): " INTPTR_FORMAT " _committed[%d].last(): " INTPTR_FORMAT, |
343 | ind, p2i(_committed[ind].start()), ind, p2i(_committed[ind].last())); |
344 | log_trace(gc, barrier)(" byte_for(start): " INTPTR_FORMAT " byte_for(last): " INTPTR_FORMAT, |
345 | p2i(byte_for(_covered[ind].start())), p2i(byte_for(_covered[ind].last()))); |
346 | log_trace(gc, barrier)(" addr_for(start): " INTPTR_FORMAT " addr_for(last): " INTPTR_FORMAT, |
347 | p2i(addr_for((CardValue*) _committed[ind].start())), p2i(addr_for((CardValue*) _committed[ind].last()))); |
348 | |
349 | // Touch the last card of the covered region to show that it |
350 | // is committed (or SEGV). |
351 | debug_only((void) (*byte_for(_covered[ind].last()));) |
352 | debug_only(verify_guard();) |
353 | } |
354 | |
355 | // Note that these versions are precise! The scanning code has to handle the |
356 | // fact that the write barrier may be either precise or imprecise. |
357 | void CardTable::dirty_MemRegion(MemRegion mr) { |
358 | assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start" ); |
359 | assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); |
360 | CardValue* cur = byte_for(mr.start()); |
361 | CardValue* last = byte_after(mr.last()); |
362 | while (cur < last) { |
363 | *cur = dirty_card; |
364 | cur++; |
365 | } |
366 | } |
367 | |
368 | void CardTable::clear_MemRegion(MemRegion mr) { |
369 | // Be conservative: only clean cards entirely contained within the |
370 | // region. |
371 | CardValue* cur; |
372 | if (mr.start() == _whole_heap.start()) { |
373 | cur = byte_for(mr.start()); |
374 | } else { |
375 | assert(mr.start() > _whole_heap.start(), "mr is not covered." ); |
376 | cur = byte_after(mr.start() - 1); |
377 | } |
378 | CardValue* last = byte_after(mr.last()); |
379 | memset(cur, clean_card, pointer_delta(last, cur, sizeof(CardValue))); |
380 | } |
381 | |
382 | void CardTable::clear(MemRegion mr) { |
383 | for (int i = 0; i < _cur_covered_regions; i++) { |
384 | MemRegion mri = mr.intersection(_covered[i]); |
385 | if (!mri.is_empty()) clear_MemRegion(mri); |
386 | } |
387 | } |
388 | |
389 | void CardTable::dirty(MemRegion mr) { |
390 | CardValue* first = byte_for(mr.start()); |
391 | CardValue* last = byte_after(mr.last()); |
392 | memset(first, dirty_card, last-first); |
393 | } |
394 | |
395 | // Unlike several other card table methods, dirty_card_iterate() |
396 | // iterates over dirty cards ranges in increasing address order. |
397 | void CardTable::dirty_card_iterate(MemRegion mr, MemRegionClosure* cl) { |
398 | for (int i = 0; i < _cur_covered_regions; i++) { |
399 | MemRegion mri = mr.intersection(_covered[i]); |
400 | if (!mri.is_empty()) { |
401 | CardValue *cur_entry, *next_entry, *limit; |
402 | for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); |
403 | cur_entry <= limit; |
404 | cur_entry = next_entry) { |
405 | next_entry = cur_entry + 1; |
406 | if (*cur_entry == dirty_card) { |
407 | size_t dirty_cards; |
408 | // Accumulate maximal dirty card range, starting at cur_entry |
409 | for (dirty_cards = 1; |
410 | next_entry <= limit && *next_entry == dirty_card; |
411 | dirty_cards++, next_entry++); |
412 | MemRegion cur_cards(addr_for(cur_entry), |
413 | dirty_cards*card_size_in_words); |
414 | cl->do_MemRegion(cur_cards); |
415 | } |
416 | } |
417 | } |
418 | } |
419 | } |
420 | |
421 | MemRegion CardTable::dirty_card_range_after_reset(MemRegion mr, |
422 | bool reset, |
423 | int reset_val) { |
424 | for (int i = 0; i < _cur_covered_regions; i++) { |
425 | MemRegion mri = mr.intersection(_covered[i]); |
426 | if (!mri.is_empty()) { |
427 | CardValue* cur_entry, *next_entry, *limit; |
428 | for (cur_entry = byte_for(mri.start()), limit = byte_for(mri.last()); |
429 | cur_entry <= limit; |
430 | cur_entry = next_entry) { |
431 | next_entry = cur_entry + 1; |
432 | if (*cur_entry == dirty_card) { |
433 | size_t dirty_cards; |
434 | // Accumulate maximal dirty card range, starting at cur_entry |
435 | for (dirty_cards = 1; |
436 | next_entry <= limit && *next_entry == dirty_card; |
437 | dirty_cards++, next_entry++); |
438 | MemRegion cur_cards(addr_for(cur_entry), |
439 | dirty_cards*card_size_in_words); |
440 | if (reset) { |
441 | for (size_t i = 0; i < dirty_cards; i++) { |
442 | cur_entry[i] = reset_val; |
443 | } |
444 | } |
445 | return cur_cards; |
446 | } |
447 | } |
448 | } |
449 | } |
450 | return MemRegion(mr.end(), mr.end()); |
451 | } |
452 | |
453 | uintx CardTable::ct_max_alignment_constraint() { |
454 | return card_size * os::vm_page_size(); |
455 | } |
456 | |
457 | void CardTable::verify_guard() { |
458 | // For product build verification |
459 | guarantee(_byte_map[_guard_index] == last_card, |
460 | "card table guard has been modified" ); |
461 | } |
462 | |
463 | void CardTable::invalidate(MemRegion mr) { |
464 | assert(align_down(mr.start(), HeapWordSize) == mr.start(), "Unaligned start" ); |
465 | assert(align_up (mr.end(), HeapWordSize) == mr.end(), "Unaligned end" ); |
466 | for (int i = 0; i < _cur_covered_regions; i++) { |
467 | MemRegion mri = mr.intersection(_covered[i]); |
468 | if (!mri.is_empty()) dirty_MemRegion(mri); |
469 | } |
470 | } |
471 | |
472 | void CardTable::verify() { |
473 | verify_guard(); |
474 | } |
475 | |
476 | #ifndef PRODUCT |
477 | void CardTable::verify_region(MemRegion mr, CardValue val, bool val_equals) { |
478 | CardValue* start = byte_for(mr.start()); |
479 | CardValue* end = byte_for(mr.last()); |
480 | bool failures = false; |
481 | for (CardValue* curr = start; curr <= end; ++curr) { |
482 | CardValue curr_val = *curr; |
483 | bool failed = (val_equals) ? (curr_val != val) : (curr_val == val); |
484 | if (failed) { |
485 | if (!failures) { |
486 | log_error(gc, verify)("== CT verification failed: [" INTPTR_FORMAT "," INTPTR_FORMAT "]" , p2i(start), p2i(end)); |
487 | log_error(gc, verify)("== %sexpecting value: %d" , (val_equals) ? "" : "not " , val); |
488 | failures = true; |
489 | } |
490 | log_error(gc, verify)("== card " PTR_FORMAT " [" PTR_FORMAT "," PTR_FORMAT "], val: %d" , |
491 | p2i(curr), p2i(addr_for(curr)), |
492 | p2i((HeapWord*) (((size_t) addr_for(curr)) + card_size)), |
493 | (int) curr_val); |
494 | } |
495 | } |
496 | guarantee(!failures, "there should not have been any failures" ); |
497 | } |
498 | |
499 | void CardTable::verify_not_dirty_region(MemRegion mr) { |
500 | verify_region(mr, dirty_card, false /* val_equals */); |
501 | } |
502 | |
503 | void CardTable::verify_dirty_region(MemRegion mr) { |
504 | verify_region(mr, dirty_card, true /* val_equals */); |
505 | } |
506 | #endif |
507 | |
508 | void CardTable::print_on(outputStream* st) const { |
509 | st->print_cr("Card table byte_map: [" INTPTR_FORMAT "," INTPTR_FORMAT "] _byte_map_base: " INTPTR_FORMAT, |
510 | p2i(_byte_map), p2i(_byte_map + _byte_map_size), p2i(_byte_map_base)); |
511 | } |
512 | |