| 1 | /* |
| 2 | * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | */ |
| 23 | |
| 24 | #include "precompiled.hpp" |
| 25 | #include "classfile/classLoaderData.hpp" |
| 26 | #include "classfile/classLoaderDataGraph.hpp" |
| 27 | #include "gc/z/zBarrier.inline.hpp" |
| 28 | #include "gc/z/zGlobals.hpp" |
| 29 | #include "gc/z/zGranuleMap.inline.hpp" |
| 30 | #include "gc/z/zHeapIterator.hpp" |
| 31 | #include "gc/z/zOop.inline.hpp" |
| 32 | #include "gc/z/zRootsIterator.hpp" |
| 33 | #include "gc/z/zStat.hpp" |
| 34 | #include "memory/iterator.inline.hpp" |
| 35 | #include "utilities/bitMap.inline.hpp" |
| 36 | #include "utilities/stack.inline.hpp" |
| 37 | |
| 38 | class ZHeapIteratorBitMap : public CHeapObj<mtGC> { |
| 39 | private: |
| 40 | CHeapBitMap _map; |
| 41 | |
| 42 | public: |
| 43 | ZHeapIteratorBitMap(size_t size_in_bits) : |
| 44 | _map(size_in_bits) {} |
| 45 | |
| 46 | bool try_set_bit(size_t index) { |
| 47 | if (_map.at(index)) { |
| 48 | return false; |
| 49 | } |
| 50 | |
| 51 | _map.set_bit(index); |
| 52 | return true; |
| 53 | } |
| 54 | }; |
| 55 | |
| 56 | template <bool Concurrent, bool Weak> |
| 57 | class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure { |
| 58 | private: |
| 59 | ZHeapIterator* const _iter; |
| 60 | |
| 61 | oop load_oop(oop* p) { |
| 62 | if (Weak) { |
| 63 | return NativeAccess<AS_NO_KEEPALIVE | ON_PHANTOM_OOP_REF>::oop_load(p); |
| 64 | } |
| 65 | |
| 66 | if (Concurrent) { |
| 67 | return NativeAccess<AS_NO_KEEPALIVE>::oop_load(p); |
| 68 | } |
| 69 | |
| 70 | return RawAccess<>::oop_load(p); |
| 71 | } |
| 72 | |
| 73 | public: |
| 74 | ZHeapIteratorRootOopClosure(ZHeapIterator* iter) : |
| 75 | _iter(iter) {} |
| 76 | |
| 77 | virtual void do_oop(oop* p) { |
| 78 | const oop obj = load_oop(p); |
| 79 | _iter->push(obj); |
| 80 | } |
| 81 | |
| 82 | virtual void do_oop(narrowOop* p) { |
| 83 | ShouldNotReachHere(); |
| 84 | } |
| 85 | }; |
| 86 | |
| 87 | template <bool VisitReferents> |
| 88 | class ZHeapIteratorOopClosure : public ClaimMetadataVisitingOopIterateClosure { |
| 89 | private: |
| 90 | ZHeapIterator* const _iter; |
| 91 | const oop _base; |
| 92 | |
| 93 | oop load_oop(oop* p) { |
| 94 | if (VisitReferents) { |
| 95 | return HeapAccess<AS_NO_KEEPALIVE | ON_UNKNOWN_OOP_REF>::oop_load_at(_base, _base->field_offset(p)); |
| 96 | } |
| 97 | |
| 98 | return HeapAccess<AS_NO_KEEPALIVE>::oop_load(p); |
| 99 | } |
| 100 | |
| 101 | public: |
| 102 | ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base) : |
| 103 | ClaimMetadataVisitingOopIterateClosure(ClassLoaderData::_claim_other), |
| 104 | _iter(iter), |
| 105 | _base(base) {} |
| 106 | |
| 107 | virtual ReferenceIterationMode reference_iteration_mode() { |
| 108 | return VisitReferents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT; |
| 109 | } |
| 110 | |
| 111 | virtual void do_oop(oop* p) { |
| 112 | const oop obj = load_oop(p); |
| 113 | _iter->push(obj); |
| 114 | } |
| 115 | |
| 116 | virtual void do_oop(narrowOop* p) { |
| 117 | ShouldNotReachHere(); |
| 118 | } |
| 119 | |
| 120 | #ifdef ASSERT |
| 121 | virtual bool should_verify_oops() { |
| 122 | return false; |
| 123 | } |
| 124 | #endif |
| 125 | }; |
| 126 | |
| 127 | ZHeapIterator::ZHeapIterator() : |
| 128 | _visit_stack(), |
| 129 | _visit_map() {} |
| 130 | |
| 131 | ZHeapIterator::~ZHeapIterator() { |
| 132 | ZVisitMapIterator iter(&_visit_map); |
| 133 | for (ZHeapIteratorBitMap* map; iter.next(&map);) { |
| 134 | delete map; |
| 135 | } |
| 136 | ClassLoaderDataGraph::clear_claimed_marks(ClassLoaderData::_claim_other); |
| 137 | } |
| 138 | |
| 139 | static size_t object_index_max() { |
| 140 | return ZGranuleSize >> ZObjectAlignmentSmallShift; |
| 141 | } |
| 142 | |
| 143 | static size_t object_index(oop obj) { |
| 144 | const uintptr_t addr = ZOop::to_address(obj); |
| 145 | const uintptr_t offset = ZAddress::offset(addr); |
| 146 | const uintptr_t mask = ZGranuleSize - 1; |
| 147 | return (offset & mask) >> ZObjectAlignmentSmallShift; |
| 148 | } |
| 149 | |
| 150 | ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) { |
| 151 | const uintptr_t addr = ZOop::to_address(obj); |
| 152 | ZHeapIteratorBitMap* map = _visit_map.get(addr); |
| 153 | if (map == NULL) { |
| 154 | map = new ZHeapIteratorBitMap(object_index_max()); |
| 155 | _visit_map.put(addr, map); |
| 156 | } |
| 157 | |
| 158 | return map; |
| 159 | } |
| 160 | |
| 161 | void ZHeapIterator::push(oop obj) { |
| 162 | if (obj == NULL) { |
| 163 | // Ignore |
| 164 | return; |
| 165 | } |
| 166 | |
| 167 | ZHeapIteratorBitMap* const map = object_map(obj); |
| 168 | const size_t index = object_index(obj); |
| 169 | if (!map->try_set_bit(index)) { |
| 170 | // Already pushed |
| 171 | return; |
| 172 | } |
| 173 | |
| 174 | // Push |
| 175 | _visit_stack.push(obj); |
| 176 | } |
| 177 | |
| 178 | template <typename RootsIterator, bool Concurrent, bool Weak> |
| 179 | void ZHeapIterator::push_roots() { |
| 180 | ZHeapIteratorRootOopClosure<Concurrent, Weak> cl(this); |
| 181 | RootsIterator roots; |
| 182 | roots.oops_do(&cl); |
| 183 | } |
| 184 | |
| 185 | template <bool VisitReferents> |
| 186 | void ZHeapIterator::push_fields(oop obj) { |
| 187 | ZHeapIteratorOopClosure<VisitReferents> cl(this, obj); |
| 188 | obj->oop_iterate(&cl); |
| 189 | } |
| 190 | |
| 191 | class ZHeapIterateConcurrentRootsIterator : public ZConcurrentRootsIterator { |
| 192 | public: |
| 193 | ZHeapIterateConcurrentRootsIterator() : |
| 194 | ZConcurrentRootsIterator(ClassLoaderData::_claim_other) {} |
| 195 | }; |
| 196 | |
| 197 | template <bool VisitWeaks> |
| 198 | void ZHeapIterator::objects_do(ObjectClosure* cl) { |
| 199 | ZStatTimerDisable disable; |
| 200 | |
| 201 | // Push roots to visit |
| 202 | push_roots<ZRootsIterator, false /* Concurrent */, false /* Weak */>(); |
| 203 | push_roots<ZHeapIterateConcurrentRootsIterator, true /* Concurrent */, false /* Weak */>(); |
| 204 | if (VisitWeaks) { |
| 205 | push_roots<ZWeakRootsIterator, false /* Concurrent */, true /* Weak */>(); |
| 206 | push_roots<ZConcurrentWeakRootsIterator, true /* Concurrent */, true /* Weak */>(); |
| 207 | } |
| 208 | |
| 209 | // Drain stack |
| 210 | while (!_visit_stack.is_empty()) { |
| 211 | const oop obj = _visit_stack.pop(); |
| 212 | |
| 213 | // Visit object |
| 214 | cl->do_object(obj); |
| 215 | |
| 216 | // Push fields to visit |
| 217 | push_fields<VisitWeaks>(obj); |
| 218 | } |
| 219 | } |
| 220 | |
| 221 | void ZHeapIterator::objects_do(ObjectClosure* cl, bool visit_weaks) { |
| 222 | if (visit_weaks) { |
| 223 | objects_do<true /* VisitWeaks */>(cl); |
| 224 | } else { |
| 225 | objects_do<false /* VisitWeaks */>(cl); |
| 226 | } |
| 227 | } |
| 228 | |