1 | /* |
2 | * Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #include "precompiled.hpp" |
26 | #include "code/nmethod.hpp" |
27 | #include "gc/g1/g1Allocator.inline.hpp" |
28 | #include "gc/g1/g1CollectedHeap.inline.hpp" |
29 | #include "gc/g1/g1ConcurrentMarkThread.hpp" |
30 | #include "gc/g1/g1HeapVerifier.hpp" |
31 | #include "gc/g1/g1Policy.hpp" |
32 | #include "gc/g1/g1RemSet.hpp" |
33 | #include "gc/g1/g1RootProcessor.hpp" |
34 | #include "gc/g1/heapRegion.inline.hpp" |
35 | #include "gc/g1/heapRegionRemSet.hpp" |
36 | #include "gc/g1/g1StringDedup.hpp" |
37 | #include "logging/log.hpp" |
38 | #include "logging/logStream.hpp" |
39 | #include "memory/iterator.inline.hpp" |
40 | #include "memory/resourceArea.hpp" |
41 | #include "memory/universe.hpp" |
42 | #include "oops/access.inline.hpp" |
43 | #include "oops/compressedOops.inline.hpp" |
44 | #include "oops/oop.inline.hpp" |
45 | #include "runtime/handles.inline.hpp" |
46 | |
47 | int G1HeapVerifier::_enabled_verification_types = G1HeapVerifier::G1VerifyAll; |
48 | |
49 | class VerifyRootsClosure: public OopClosure { |
50 | private: |
51 | G1CollectedHeap* _g1h; |
52 | VerifyOption _vo; |
53 | bool _failures; |
54 | public: |
55 | // _vo == UsePrevMarking -> use "prev" marking information, |
56 | // _vo == UseNextMarking -> use "next" marking information, |
57 | // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS |
58 | VerifyRootsClosure(VerifyOption vo) : |
59 | _g1h(G1CollectedHeap::heap()), |
60 | _vo(vo), |
61 | _failures(false) { } |
62 | |
63 | bool failures() { return _failures; } |
64 | |
65 | template <class T> void do_oop_work(T* p) { |
66 | T heap_oop = RawAccess<>::oop_load(p); |
67 | if (!CompressedOops::is_null(heap_oop)) { |
68 | oop obj = CompressedOops::decode_not_null(heap_oop); |
69 | if (_g1h->is_obj_dead_cond(obj, _vo)) { |
70 | Log(gc, verify) log; |
71 | log.error("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj)); |
72 | ResourceMark rm; |
73 | LogStream ls(log.error()); |
74 | obj->print_on(&ls); |
75 | _failures = true; |
76 | } |
77 | } |
78 | } |
79 | |
80 | void do_oop(oop* p) { do_oop_work(p); } |
81 | void do_oop(narrowOop* p) { do_oop_work(p); } |
82 | }; |
83 | |
84 | class G1VerifyCodeRootOopClosure: public OopClosure { |
85 | G1CollectedHeap* _g1h; |
86 | OopClosure* _root_cl; |
87 | nmethod* _nm; |
88 | VerifyOption _vo; |
89 | bool _failures; |
90 | |
91 | template <class T> void do_oop_work(T* p) { |
92 | // First verify that this root is live |
93 | _root_cl->do_oop(p); |
94 | |
95 | if (!G1VerifyHeapRegionCodeRoots) { |
96 | // We're not verifying the code roots attached to heap region. |
97 | return; |
98 | } |
99 | |
100 | // Don't check the code roots during marking verification in a full GC |
101 | if (_vo == VerifyOption_G1UseFullMarking) { |
102 | return; |
103 | } |
104 | |
105 | // Now verify that the current nmethod (which contains p) is |
106 | // in the code root list of the heap region containing the |
107 | // object referenced by p. |
108 | |
109 | T heap_oop = RawAccess<>::oop_load(p); |
110 | if (!CompressedOops::is_null(heap_oop)) { |
111 | oop obj = CompressedOops::decode_not_null(heap_oop); |
112 | |
113 | // Now fetch the region containing the object |
114 | HeapRegion* hr = _g1h->heap_region_containing(obj); |
115 | HeapRegionRemSet* hrrs = hr->rem_set(); |
116 | // Verify that the strong code root list for this region |
117 | // contains the nmethod |
118 | if (!hrrs->strong_code_roots_list_contains(_nm)) { |
119 | log_error(gc, verify)("Code root location " PTR_FORMAT " " |
120 | "from nmethod " PTR_FORMAT " not in strong " |
121 | "code roots for region [" PTR_FORMAT "," PTR_FORMAT ")" , |
122 | p2i(p), p2i(_nm), p2i(hr->bottom()), p2i(hr->end())); |
123 | _failures = true; |
124 | } |
125 | } |
126 | } |
127 | |
128 | public: |
129 | G1VerifyCodeRootOopClosure(G1CollectedHeap* g1h, OopClosure* root_cl, VerifyOption vo): |
130 | _g1h(g1h), _root_cl(root_cl), _nm(NULL), _vo(vo), _failures(false) {} |
131 | |
132 | void do_oop(oop* p) { do_oop_work(p); } |
133 | void do_oop(narrowOop* p) { do_oop_work(p); } |
134 | |
135 | void set_nmethod(nmethod* nm) { _nm = nm; } |
136 | bool failures() { return _failures; } |
137 | }; |
138 | |
139 | class G1VerifyCodeRootBlobClosure: public CodeBlobClosure { |
140 | G1VerifyCodeRootOopClosure* _oop_cl; |
141 | |
142 | public: |
143 | G1VerifyCodeRootBlobClosure(G1VerifyCodeRootOopClosure* oop_cl): |
144 | _oop_cl(oop_cl) {} |
145 | |
146 | void do_code_blob(CodeBlob* cb) { |
147 | nmethod* nm = cb->as_nmethod_or_null(); |
148 | if (nm != NULL) { |
149 | _oop_cl->set_nmethod(nm); |
150 | nm->oops_do(_oop_cl); |
151 | } |
152 | } |
153 | }; |
154 | |
155 | class YoungRefCounterClosure : public OopClosure { |
156 | G1CollectedHeap* _g1h; |
157 | int _count; |
158 | public: |
159 | YoungRefCounterClosure(G1CollectedHeap* g1h) : _g1h(g1h), _count(0) {} |
160 | void do_oop(oop* p) { if (_g1h->is_in_young(*p)) { _count++; } } |
161 | void do_oop(narrowOop* p) { ShouldNotReachHere(); } |
162 | |
163 | int count() { return _count; } |
164 | void reset_count() { _count = 0; }; |
165 | }; |
166 | |
167 | class VerifyCLDClosure: public CLDClosure { |
168 | YoungRefCounterClosure _young_ref_counter_closure; |
169 | OopClosure *_oop_closure; |
170 | public: |
171 | VerifyCLDClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {} |
172 | void do_cld(ClassLoaderData* cld) { |
173 | cld->oops_do(_oop_closure, ClassLoaderData::_claim_none); |
174 | |
175 | _young_ref_counter_closure.reset_count(); |
176 | cld->oops_do(&_young_ref_counter_closure, ClassLoaderData::_claim_none); |
177 | if (_young_ref_counter_closure.count() > 0) { |
178 | guarantee(cld->has_modified_oops(), "CLD " PTR_FORMAT ", has young %d refs but is not dirty." , p2i(cld), _young_ref_counter_closure.count()); |
179 | } |
180 | } |
181 | }; |
182 | |
183 | class VerifyLivenessOopClosure: public BasicOopIterateClosure { |
184 | G1CollectedHeap* _g1h; |
185 | VerifyOption _vo; |
186 | public: |
187 | VerifyLivenessOopClosure(G1CollectedHeap* g1h, VerifyOption vo): |
188 | _g1h(g1h), _vo(vo) |
189 | { } |
190 | void do_oop(narrowOop *p) { do_oop_work(p); } |
191 | void do_oop( oop *p) { do_oop_work(p); } |
192 | |
193 | template <class T> void do_oop_work(T *p) { |
194 | oop obj = RawAccess<>::oop_load(p); |
195 | guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo), |
196 | "Dead object referenced by a not dead object" ); |
197 | } |
198 | }; |
199 | |
200 | class VerifyObjsInRegionClosure: public ObjectClosure { |
201 | private: |
202 | G1CollectedHeap* _g1h; |
203 | size_t _live_bytes; |
204 | HeapRegion *_hr; |
205 | VerifyOption _vo; |
206 | public: |
207 | // _vo == UsePrevMarking -> use "prev" marking information, |
208 | // _vo == UseNextMarking -> use "next" marking information, |
209 | // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS. |
210 | VerifyObjsInRegionClosure(HeapRegion *hr, VerifyOption vo) |
211 | : _live_bytes(0), _hr(hr), _vo(vo) { |
212 | _g1h = G1CollectedHeap::heap(); |
213 | } |
214 | void do_object(oop o) { |
215 | VerifyLivenessOopClosure isLive(_g1h, _vo); |
216 | assert(o != NULL, "Huh?" ); |
217 | if (!_g1h->is_obj_dead_cond(o, _vo)) { |
218 | // If the object is alive according to the full gc mark, |
219 | // then verify that the marking information agrees. |
220 | // Note we can't verify the contra-positive of the |
221 | // above: if the object is dead (according to the mark |
222 | // word), it may not be marked, or may have been marked |
223 | // but has since became dead, or may have been allocated |
224 | // since the last marking. |
225 | if (_vo == VerifyOption_G1UseFullMarking) { |
226 | guarantee(!_g1h->is_obj_dead(o), "Full GC marking and concurrent mark mismatch" ); |
227 | } |
228 | |
229 | o->oop_iterate(&isLive); |
230 | if (!_hr->obj_allocated_since_prev_marking(o)) { |
231 | size_t obj_size = o->size(); // Make sure we don't overflow |
232 | _live_bytes += (obj_size * HeapWordSize); |
233 | } |
234 | } |
235 | } |
236 | size_t live_bytes() { return _live_bytes; } |
237 | }; |
238 | |
239 | class VerifyArchiveOopClosure: public BasicOopIterateClosure { |
240 | HeapRegion* _hr; |
241 | public: |
242 | VerifyArchiveOopClosure(HeapRegion *hr) |
243 | : _hr(hr) { } |
244 | void do_oop(narrowOop *p) { do_oop_work(p); } |
245 | void do_oop( oop *p) { do_oop_work(p); } |
246 | |
247 | template <class T> void do_oop_work(T *p) { |
248 | oop obj = RawAccess<>::oop_load(p); |
249 | |
250 | if (_hr->is_open_archive()) { |
251 | guarantee(obj == NULL || G1ArchiveAllocator::is_archived_object(obj), |
252 | "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT, |
253 | p2i(p), p2i(obj)); |
254 | } else { |
255 | assert(_hr->is_closed_archive(), "should be closed archive region" ); |
256 | guarantee(obj == NULL || G1ArchiveAllocator::is_closed_archive_object(obj), |
257 | "Archive object at " PTR_FORMAT " references a non-archive object at " PTR_FORMAT, |
258 | p2i(p), p2i(obj)); |
259 | } |
260 | } |
261 | }; |
262 | |
263 | class VerifyObjectInArchiveRegionClosure: public ObjectClosure { |
264 | HeapRegion* _hr; |
265 | public: |
266 | VerifyObjectInArchiveRegionClosure(HeapRegion *hr, bool verbose) |
267 | : _hr(hr) { } |
268 | // Verify that all object pointers are to archive regions. |
269 | void do_object(oop o) { |
270 | VerifyArchiveOopClosure checkOop(_hr); |
271 | assert(o != NULL, "Should not be here for NULL oops" ); |
272 | o->oop_iterate(&checkOop); |
273 | } |
274 | }; |
275 | |
276 | // Should be only used at CDS dump time |
277 | class VerifyReadyForArchivingRegionClosure : public HeapRegionClosure { |
278 | bool _seen_free; |
279 | bool _has_holes; |
280 | bool _has_unexpected_holes; |
281 | bool _has_humongous; |
282 | public: |
283 | bool has_holes() {return _has_holes;} |
284 | bool has_unexpected_holes() {return _has_unexpected_holes;} |
285 | bool has_humongous() {return _has_humongous;} |
286 | |
287 | VerifyReadyForArchivingRegionClosure() : HeapRegionClosure() { |
288 | _seen_free = false; |
289 | _has_holes = false; |
290 | _has_unexpected_holes = false; |
291 | _has_humongous = false; |
292 | } |
293 | virtual bool do_heap_region(HeapRegion* hr) { |
294 | const char* hole = "" ; |
295 | |
296 | if (hr->is_free()) { |
297 | _seen_free = true; |
298 | } else { |
299 | if (_seen_free) { |
300 | _has_holes = true; |
301 | if (hr->is_humongous()) { |
302 | hole = " hole" ; |
303 | } else { |
304 | _has_unexpected_holes = true; |
305 | hole = " hole **** unexpected ****" ; |
306 | } |
307 | } |
308 | } |
309 | if (hr->is_humongous()) { |
310 | _has_humongous = true; |
311 | } |
312 | log_info(gc, region, cds)("HeapRegion " INTPTR_FORMAT " %s%s" , p2i(hr->bottom()), hr->get_type_str(), hole); |
313 | return false; |
314 | } |
315 | }; |
316 | |
317 | // We want all used regions to be moved to the bottom-end of the heap, so we have |
318 | // a contiguous range of free regions at the top end of the heap. This way, we can |
319 | // avoid fragmentation while allocating the archive regions. |
320 | // |
321 | // Before calling this, a full GC should have been executed with a single worker thread, |
322 | // so that no old regions would be moved to the middle of the heap. |
323 | void G1HeapVerifier::verify_ready_for_archiving() { |
324 | VerifyReadyForArchivingRegionClosure cl; |
325 | G1CollectedHeap::heap()->heap_region_iterate(&cl); |
326 | if (cl.has_holes()) { |
327 | log_warning(gc, verify)("All free regions should be at the top end of the heap, but" |
328 | " we found holes. This is probably caused by (unmovable) humongous" |
329 | " allocations, and may lead to fragmentation while" |
330 | " writing archive heap memory regions." ); |
331 | } |
332 | if (cl.has_humongous()) { |
333 | log_warning(gc, verify)("(Unmovable) humongous regions have been found and" |
334 | " may lead to fragmentation while" |
335 | " writing archive heap memory regions." ); |
336 | } |
337 | assert(!cl.has_unexpected_holes(), "all holes should have been caused by humongous regions" ); |
338 | } |
339 | |
340 | class VerifyArchivePointerRegionClosure: public HeapRegionClosure { |
341 | virtual bool do_heap_region(HeapRegion* r) { |
342 | if (r->is_archive()) { |
343 | VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false); |
344 | r->object_iterate(&verify_oop_pointers); |
345 | } |
346 | return false; |
347 | } |
348 | }; |
349 | |
350 | void G1HeapVerifier::verify_archive_regions() { |
351 | G1CollectedHeap* g1h = G1CollectedHeap::heap(); |
352 | VerifyArchivePointerRegionClosure cl; |
353 | g1h->heap_region_iterate(&cl); |
354 | } |
355 | |
356 | class VerifyRegionClosure: public HeapRegionClosure { |
357 | private: |
358 | bool _par; |
359 | VerifyOption _vo; |
360 | bool _failures; |
361 | public: |
362 | // _vo == UsePrevMarking -> use "prev" marking information, |
363 | // _vo == UseNextMarking -> use "next" marking information, |
364 | // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS |
365 | VerifyRegionClosure(bool par, VerifyOption vo) |
366 | : _par(par), |
367 | _vo(vo), |
368 | _failures(false) {} |
369 | |
370 | bool failures() { |
371 | return _failures; |
372 | } |
373 | |
374 | bool do_heap_region(HeapRegion* r) { |
375 | guarantee(!r->has_index_in_opt_cset(), "Region %u still has opt collection set index %u" , r->hrm_index(), r->index_in_opt_cset()); |
376 | guarantee(!r->is_young() || r->rem_set()->is_complete(), "Remembered set for Young region %u must be complete, is %s" , r->hrm_index(), r->rem_set()->get_state_str()); |
377 | // Humongous and old regions regions might be of any state, so can't check here. |
378 | guarantee(!r->is_free() || !r->rem_set()->is_tracked(), "Remembered set for free region %u must be untracked, is %s" , r->hrm_index(), r->rem_set()->get_state_str()); |
379 | // Verify that the continues humongous regions' remembered set state matches the |
380 | // one from the starts humongous region. |
381 | if (r->is_continues_humongous()) { |
382 | if (r->rem_set()->get_state_str() != r->humongous_start_region()->rem_set()->get_state_str()) { |
383 | log_error(gc, verify)("Remset states differ: Region %u (%s) remset %s with starts region %u (%s) remset %s" , |
384 | r->hrm_index(), |
385 | r->get_short_type_str(), |
386 | r->rem_set()->get_state_str(), |
387 | r->humongous_start_region()->hrm_index(), |
388 | r->humongous_start_region()->get_short_type_str(), |
389 | r->humongous_start_region()->rem_set()->get_state_str()); |
390 | _failures = true; |
391 | } |
392 | } |
393 | // For archive regions, verify there are no heap pointers to |
394 | // non-pinned regions. For all others, verify liveness info. |
395 | if (r->is_closed_archive()) { |
396 | VerifyObjectInArchiveRegionClosure verify_oop_pointers(r, false); |
397 | r->object_iterate(&verify_oop_pointers); |
398 | return true; |
399 | } else if (r->is_open_archive()) { |
400 | VerifyObjsInRegionClosure verify_open_archive_oop(r, _vo); |
401 | r->object_iterate(&verify_open_archive_oop); |
402 | return true; |
403 | } else if (!r->is_continues_humongous()) { |
404 | bool failures = false; |
405 | r->verify(_vo, &failures); |
406 | if (failures) { |
407 | _failures = true; |
408 | } else if (!r->is_starts_humongous()) { |
409 | VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo); |
410 | r->object_iterate(¬_dead_yet_cl); |
411 | if (_vo != VerifyOption_G1UseNextMarking) { |
412 | if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) { |
413 | log_error(gc, verify)("[" PTR_FORMAT "," PTR_FORMAT "] max_live_bytes " SIZE_FORMAT " < calculated " SIZE_FORMAT, |
414 | p2i(r->bottom()), p2i(r->end()), r->max_live_bytes(), not_dead_yet_cl.live_bytes()); |
415 | _failures = true; |
416 | } |
417 | } else { |
418 | // When vo == UseNextMarking we cannot currently do a sanity |
419 | // check on the live bytes as the calculation has not been |
420 | // finalized yet. |
421 | } |
422 | } |
423 | } |
424 | return false; // stop the region iteration if we hit a failure |
425 | } |
426 | }; |
427 | |
428 | // This is the task used for parallel verification of the heap regions |
429 | |
430 | class G1ParVerifyTask: public AbstractGangTask { |
431 | private: |
432 | G1CollectedHeap* _g1h; |
433 | VerifyOption _vo; |
434 | bool _failures; |
435 | HeapRegionClaimer _hrclaimer; |
436 | |
437 | public: |
438 | // _vo == UsePrevMarking -> use "prev" marking information, |
439 | // _vo == UseNextMarking -> use "next" marking information, |
440 | // _vo == UseFullMarking -> use "next" marking bitmap but no TAMS |
441 | G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) : |
442 | AbstractGangTask("Parallel verify task" ), |
443 | _g1h(g1h), |
444 | _vo(vo), |
445 | _failures(false), |
446 | _hrclaimer(g1h->workers()->active_workers()) {} |
447 | |
448 | bool failures() { |
449 | return _failures; |
450 | } |
451 | |
452 | void work(uint worker_id) { |
453 | HandleMark hm; |
454 | VerifyRegionClosure blk(true, _vo); |
455 | _g1h->heap_region_par_iterate_from_worker_offset(&blk, &_hrclaimer, worker_id); |
456 | if (blk.failures()) { |
457 | _failures = true; |
458 | } |
459 | } |
460 | }; |
461 | |
462 | void G1HeapVerifier::enable_verification_type(G1VerifyType type) { |
463 | // First enable will clear _enabled_verification_types. |
464 | if (_enabled_verification_types == G1VerifyAll) { |
465 | _enabled_verification_types = type; |
466 | } else { |
467 | _enabled_verification_types |= type; |
468 | } |
469 | } |
470 | |
471 | bool G1HeapVerifier::should_verify(G1VerifyType type) { |
472 | return (_enabled_verification_types & type) == type; |
473 | } |
474 | |
475 | void G1HeapVerifier::verify(VerifyOption vo) { |
476 | if (!SafepointSynchronize::is_at_safepoint()) { |
477 | log_info(gc, verify)("Skipping verification. Not at safepoint." ); |
478 | } |
479 | |
480 | assert(Thread::current()->is_VM_thread(), |
481 | "Expected to be executed serially by the VM thread at this point" ); |
482 | |
483 | log_debug(gc, verify)("Roots" ); |
484 | VerifyRootsClosure rootsCl(vo); |
485 | VerifyCLDClosure cldCl(_g1h, &rootsCl); |
486 | |
487 | // We apply the relevant closures to all the oops in the |
488 | // system dictionary, class loader data graph, the string table |
489 | // and the nmethods in the code cache. |
490 | G1VerifyCodeRootOopClosure codeRootsCl(_g1h, &rootsCl, vo); |
491 | G1VerifyCodeRootBlobClosure blobsCl(&codeRootsCl); |
492 | |
493 | { |
494 | G1RootProcessor root_processor(_g1h, 1); |
495 | root_processor.process_all_roots(&rootsCl, &cldCl, &blobsCl); |
496 | } |
497 | |
498 | bool failures = rootsCl.failures() || codeRootsCl.failures(); |
499 | |
500 | if (!_g1h->policy()->collector_state()->in_full_gc()) { |
501 | // If we're verifying during a full GC then the region sets |
502 | // will have been torn down at the start of the GC. Therefore |
503 | // verifying the region sets will fail. So we only verify |
504 | // the region sets when not in a full GC. |
505 | log_debug(gc, verify)("HeapRegionSets" ); |
506 | verify_region_sets(); |
507 | } |
508 | |
509 | log_debug(gc, verify)("HeapRegions" ); |
510 | if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { |
511 | |
512 | G1ParVerifyTask task(_g1h, vo); |
513 | _g1h->workers()->run_task(&task); |
514 | if (task.failures()) { |
515 | failures = true; |
516 | } |
517 | |
518 | } else { |
519 | VerifyRegionClosure blk(false, vo); |
520 | _g1h->heap_region_iterate(&blk); |
521 | if (blk.failures()) { |
522 | failures = true; |
523 | } |
524 | } |
525 | |
526 | if (G1StringDedup::is_enabled()) { |
527 | log_debug(gc, verify)("StrDedup" ); |
528 | G1StringDedup::verify(); |
529 | } |
530 | |
531 | if (failures) { |
532 | log_error(gc, verify)("Heap after failed verification (kind %d):" , vo); |
533 | // It helps to have the per-region information in the output to |
534 | // help us track down what went wrong. This is why we call |
535 | // print_extended_on() instead of print_on(). |
536 | Log(gc, verify) log; |
537 | ResourceMark rm; |
538 | LogStream ls(log.error()); |
539 | _g1h->print_extended_on(&ls); |
540 | } |
541 | guarantee(!failures, "there should not have been any failures" ); |
542 | } |
543 | |
544 | // Heap region set verification |
545 | |
546 | class VerifyRegionListsClosure : public HeapRegionClosure { |
547 | private: |
548 | HeapRegionSet* _old_set; |
549 | HeapRegionSet* _archive_set; |
550 | HeapRegionSet* _humongous_set; |
551 | HeapRegionManager* _hrm; |
552 | |
553 | public: |
554 | uint _old_count; |
555 | uint _archive_count; |
556 | uint _humongous_count; |
557 | uint _free_count; |
558 | |
559 | VerifyRegionListsClosure(HeapRegionSet* old_set, |
560 | HeapRegionSet* archive_set, |
561 | HeapRegionSet* humongous_set, |
562 | HeapRegionManager* hrm) : |
563 | _old_set(old_set), _archive_set(archive_set), _humongous_set(humongous_set), _hrm(hrm), |
564 | _old_count(), _archive_count(), _humongous_count(), _free_count(){ } |
565 | |
566 | bool do_heap_region(HeapRegion* hr) { |
567 | if (hr->is_young()) { |
568 | // TODO |
569 | } else if (hr->is_humongous()) { |
570 | assert(hr->containing_set() == _humongous_set, "Heap region %u is humongous but not in humongous set." , hr->hrm_index()); |
571 | _humongous_count++; |
572 | } else if (hr->is_empty()) { |
573 | assert(_hrm->is_free(hr), "Heap region %u is empty but not on the free list." , hr->hrm_index()); |
574 | _free_count++; |
575 | } else if (hr->is_archive()) { |
576 | assert(hr->containing_set() == _archive_set, "Heap region %u is archive but not in the archive set." , hr->hrm_index()); |
577 | _archive_count++; |
578 | } else if (hr->is_old()) { |
579 | assert(hr->containing_set() == _old_set, "Heap region %u is old but not in the old set." , hr->hrm_index()); |
580 | _old_count++; |
581 | } else { |
582 | // There are no other valid region types. Check for one invalid |
583 | // one we can identify: pinned without old or humongous set. |
584 | assert(!hr->is_pinned(), "Heap region %u is pinned but not old (archive) or humongous." , hr->hrm_index()); |
585 | ShouldNotReachHere(); |
586 | } |
587 | return false; |
588 | } |
589 | |
590 | void verify_counts(HeapRegionSet* old_set, HeapRegionSet* archive_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) { |
591 | guarantee(old_set->length() == _old_count, "Old set count mismatch. Expected %u, actual %u." , old_set->length(), _old_count); |
592 | guarantee(archive_set->length() == _archive_count, "Archive set count mismatch. Expected %u, actual %u." , archive_set->length(), _archive_count); |
593 | guarantee(humongous_set->length() == _humongous_count, "Hum set count mismatch. Expected %u, actual %u." , humongous_set->length(), _humongous_count); |
594 | guarantee(free_list->num_free_regions() == _free_count, "Free list count mismatch. Expected %u, actual %u." , free_list->num_free_regions(), _free_count); |
595 | } |
596 | }; |
597 | |
598 | void G1HeapVerifier::verify_region_sets() { |
599 | assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */); |
600 | |
601 | // First, check the explicit lists. |
602 | _g1h->_hrm->verify(); |
603 | |
604 | // Finally, make sure that the region accounting in the lists is |
605 | // consistent with what we see in the heap. |
606 | |
607 | VerifyRegionListsClosure cl(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm); |
608 | _g1h->heap_region_iterate(&cl); |
609 | cl.verify_counts(&_g1h->_old_set, &_g1h->_archive_set, &_g1h->_humongous_set, _g1h->_hrm); |
610 | } |
611 | |
612 | void G1HeapVerifier::prepare_for_verify() { |
613 | if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) { |
614 | _g1h->ensure_parsability(false); |
615 | } |
616 | } |
617 | |
618 | double G1HeapVerifier::verify(G1VerifyType type, VerifyOption vo, const char* msg) { |
619 | double verify_time_ms = 0.0; |
620 | |
621 | if (should_verify(type) && _g1h->total_collections() >= VerifyGCStartAt) { |
622 | double verify_start = os::elapsedTime(); |
623 | HandleMark hm; // Discard invalid handles created during verification |
624 | prepare_for_verify(); |
625 | Universe::verify(vo, msg); |
626 | verify_time_ms = (os::elapsedTime() - verify_start) * 1000; |
627 | } |
628 | |
629 | return verify_time_ms; |
630 | } |
631 | |
632 | void G1HeapVerifier::verify_before_gc(G1VerifyType type) { |
633 | if (VerifyBeforeGC) { |
634 | double verify_time_ms = verify(type, VerifyOption_G1UsePrevMarking, "Before GC" ); |
635 | _g1h->phase_times()->record_verify_before_time_ms(verify_time_ms); |
636 | } |
637 | } |
638 | |
639 | void G1HeapVerifier::verify_after_gc(G1VerifyType type) { |
640 | if (VerifyAfterGC) { |
641 | double verify_time_ms = verify(type, VerifyOption_G1UsePrevMarking, "After GC" ); |
642 | _g1h->phase_times()->record_verify_after_time_ms(verify_time_ms); |
643 | } |
644 | } |
645 | |
646 | |
647 | #ifndef PRODUCT |
648 | class G1VerifyCardTableCleanup: public HeapRegionClosure { |
649 | G1HeapVerifier* _verifier; |
650 | public: |
651 | G1VerifyCardTableCleanup(G1HeapVerifier* verifier) |
652 | : _verifier(verifier) { } |
653 | virtual bool do_heap_region(HeapRegion* r) { |
654 | if (r->is_survivor()) { |
655 | _verifier->verify_dirty_region(r); |
656 | } else { |
657 | _verifier->verify_not_dirty_region(r); |
658 | } |
659 | return false; |
660 | } |
661 | }; |
662 | |
663 | void G1HeapVerifier::verify_card_table_cleanup() { |
664 | if (G1VerifyCTCleanup || VerifyAfterGC) { |
665 | G1VerifyCardTableCleanup cleanup_verifier(this); |
666 | _g1h->heap_region_iterate(&cleanup_verifier); |
667 | } |
668 | } |
669 | |
670 | void G1HeapVerifier::verify_not_dirty_region(HeapRegion* hr) { |
671 | // All of the region should be clean. |
672 | G1CardTable* ct = _g1h->card_table(); |
673 | MemRegion mr(hr->bottom(), hr->end()); |
674 | ct->verify_not_dirty_region(mr); |
675 | } |
676 | |
677 | void G1HeapVerifier::verify_dirty_region(HeapRegion* hr) { |
678 | // We cannot guarantee that [bottom(),end()] is dirty. Threads |
679 | // dirty allocated blocks as they allocate them. The thread that |
680 | // retires each region and replaces it with a new one will do a |
681 | // maximal allocation to fill in [pre_dummy_top(),end()] but will |
682 | // not dirty that area (one less thing to have to do while holding |
683 | // a lock). So we can only verify that [bottom(),pre_dummy_top()] |
684 | // is dirty. |
685 | G1CardTable* ct = _g1h->card_table(); |
686 | MemRegion mr(hr->bottom(), hr->pre_dummy_top()); |
687 | if (hr->is_young()) { |
688 | ct->verify_g1_young_region(mr); |
689 | } else { |
690 | ct->verify_dirty_region(mr); |
691 | } |
692 | } |
693 | |
694 | class G1VerifyDirtyYoungListClosure : public HeapRegionClosure { |
695 | private: |
696 | G1HeapVerifier* _verifier; |
697 | public: |
698 | G1VerifyDirtyYoungListClosure(G1HeapVerifier* verifier) : HeapRegionClosure(), _verifier(verifier) { } |
699 | virtual bool do_heap_region(HeapRegion* r) { |
700 | _verifier->verify_dirty_region(r); |
701 | return false; |
702 | } |
703 | }; |
704 | |
705 | void G1HeapVerifier::verify_dirty_young_regions() { |
706 | G1VerifyDirtyYoungListClosure cl(this); |
707 | _g1h->collection_set()->iterate(&cl); |
708 | } |
709 | |
710 | bool G1HeapVerifier::verify_no_bits_over_tams(const char* bitmap_name, const G1CMBitMap* const bitmap, |
711 | HeapWord* tams, HeapWord* end) { |
712 | guarantee(tams <= end, |
713 | "tams: " PTR_FORMAT " end: " PTR_FORMAT, p2i(tams), p2i(end)); |
714 | HeapWord* result = bitmap->get_next_marked_addr(tams, end); |
715 | if (result < end) { |
716 | log_error(gc, verify)("## wrong marked address on %s bitmap: " PTR_FORMAT, bitmap_name, p2i(result)); |
717 | log_error(gc, verify)("## %s tams: " PTR_FORMAT " end: " PTR_FORMAT, bitmap_name, p2i(tams), p2i(end)); |
718 | return false; |
719 | } |
720 | return true; |
721 | } |
722 | |
723 | bool G1HeapVerifier::verify_bitmaps(const char* caller, HeapRegion* hr) { |
724 | const G1CMBitMap* const prev_bitmap = _g1h->concurrent_mark()->prev_mark_bitmap(); |
725 | const G1CMBitMap* const next_bitmap = _g1h->concurrent_mark()->next_mark_bitmap(); |
726 | |
727 | HeapWord* ptams = hr->prev_top_at_mark_start(); |
728 | HeapWord* ntams = hr->next_top_at_mark_start(); |
729 | HeapWord* end = hr->end(); |
730 | |
731 | bool res_p = verify_no_bits_over_tams("prev" , prev_bitmap, ptams, end); |
732 | |
733 | bool res_n = true; |
734 | // We cannot verify the next bitmap while we are about to clear it. |
735 | if (!_g1h->collector_state()->clearing_next_bitmap()) { |
736 | res_n = verify_no_bits_over_tams("next" , next_bitmap, ntams, end); |
737 | } |
738 | if (!res_p || !res_n) { |
739 | log_error(gc, verify)("#### Bitmap verification failed for " HR_FORMAT, HR_FORMAT_PARAMS(hr)); |
740 | log_error(gc, verify)("#### Caller: %s" , caller); |
741 | return false; |
742 | } |
743 | return true; |
744 | } |
745 | |
746 | void G1HeapVerifier::check_bitmaps(const char* caller, HeapRegion* hr) { |
747 | if (!G1VerifyBitmaps) { |
748 | return; |
749 | } |
750 | |
751 | guarantee(verify_bitmaps(caller, hr), "bitmap verification" ); |
752 | } |
753 | |
754 | class G1VerifyBitmapClosure : public HeapRegionClosure { |
755 | private: |
756 | const char* _caller; |
757 | G1HeapVerifier* _verifier; |
758 | bool _failures; |
759 | |
760 | public: |
761 | G1VerifyBitmapClosure(const char* caller, G1HeapVerifier* verifier) : |
762 | _caller(caller), _verifier(verifier), _failures(false) { } |
763 | |
764 | bool failures() { return _failures; } |
765 | |
766 | virtual bool do_heap_region(HeapRegion* hr) { |
767 | bool result = _verifier->verify_bitmaps(_caller, hr); |
768 | if (!result) { |
769 | _failures = true; |
770 | } |
771 | return false; |
772 | } |
773 | }; |
774 | |
775 | void G1HeapVerifier::check_bitmaps(const char* caller) { |
776 | if (!G1VerifyBitmaps) { |
777 | return; |
778 | } |
779 | |
780 | G1VerifyBitmapClosure cl(caller, this); |
781 | _g1h->heap_region_iterate(&cl); |
782 | guarantee(!cl.failures(), "bitmap verification" ); |
783 | } |
784 | |
785 | class G1CheckRegionAttrTableClosure : public HeapRegionClosure { |
786 | private: |
787 | bool _failures; |
788 | |
789 | public: |
790 | G1CheckRegionAttrTableClosure() : HeapRegionClosure(), _failures(false) { } |
791 | |
792 | virtual bool do_heap_region(HeapRegion* hr) { |
793 | uint i = hr->hrm_index(); |
794 | G1HeapRegionAttr region_attr = (G1HeapRegionAttr) G1CollectedHeap::heap()->_region_attr.get_by_index(i); |
795 | if (hr->is_humongous()) { |
796 | if (hr->in_collection_set()) { |
797 | log_error(gc, verify)("## humongous region %u in CSet" , i); |
798 | _failures = true; |
799 | return true; |
800 | } |
801 | if (region_attr.is_in_cset()) { |
802 | log_error(gc, verify)("## inconsistent region attr type %s for humongous region %u" , region_attr.get_type_str(), i); |
803 | _failures = true; |
804 | return true; |
805 | } |
806 | if (hr->is_continues_humongous() && region_attr.is_humongous()) { |
807 | log_error(gc, verify)("## inconsistent region attr type %s for continues humongous region %u" , region_attr.get_type_str(), i); |
808 | _failures = true; |
809 | return true; |
810 | } |
811 | } else { |
812 | if (region_attr.is_humongous()) { |
813 | log_error(gc, verify)("## inconsistent region attr type %s for non-humongous region %u" , region_attr.get_type_str(), i); |
814 | _failures = true; |
815 | return true; |
816 | } |
817 | if (hr->in_collection_set() != region_attr.is_in_cset()) { |
818 | log_error(gc, verify)("## in CSet %d / region attr type %s inconsistency for region %u" , |
819 | hr->in_collection_set(), region_attr.get_type_str(), i); |
820 | _failures = true; |
821 | return true; |
822 | } |
823 | if (region_attr.is_in_cset()) { |
824 | if (hr->is_archive()) { |
825 | log_error(gc, verify)("## is_archive in collection set for region %u" , i); |
826 | _failures = true; |
827 | return true; |
828 | } |
829 | if (hr->is_young() != (region_attr.is_young())) { |
830 | log_error(gc, verify)("## is_young %d / region attr type %s inconsistency for region %u" , |
831 | hr->is_young(), region_attr.get_type_str(), i); |
832 | _failures = true; |
833 | return true; |
834 | } |
835 | if (hr->is_old() != (region_attr.is_old())) { |
836 | log_error(gc, verify)("## is_old %d / region attr type %s inconsistency for region %u" , |
837 | hr->is_old(), region_attr.get_type_str(), i); |
838 | _failures = true; |
839 | return true; |
840 | } |
841 | } |
842 | } |
843 | return false; |
844 | } |
845 | |
846 | bool failures() const { return _failures; } |
847 | }; |
848 | |
849 | bool G1HeapVerifier::check_region_attr_table() { |
850 | G1CheckRegionAttrTableClosure cl; |
851 | _g1h->_hrm->iterate(&cl); |
852 | return !cl.failures(); |
853 | } |
854 | #endif // PRODUCT |
855 | |