| 1 | /* |
| 2 | * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. |
| 3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| 4 | * |
| 5 | * This code is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 only, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| 12 | * version 2 for more details (a copy is included in the LICENSE file that |
| 13 | * accompanied this code). |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License version |
| 16 | * 2 along with this work; if not, write to the Free Software Foundation, |
| 17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | * |
| 19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| 20 | * or visit www.oracle.com if you need additional information or have any |
| 21 | * questions. |
| 22 | * |
| 23 | */ |
| 24 | |
| 25 | #ifndef SHARE_MEMORY_ITERATOR_HPP |
| 26 | #define SHARE_MEMORY_ITERATOR_HPP |
| 27 | |
| 28 | #include "memory/allocation.hpp" |
| 29 | #include "memory/memRegion.hpp" |
| 30 | #include "oops/oopsHierarchy.hpp" |
| 31 | |
| 32 | class CodeBlob; |
| 33 | class nmethod; |
| 34 | class ReferenceDiscoverer; |
| 35 | class DataLayout; |
| 36 | class KlassClosure; |
| 37 | class ClassLoaderData; |
| 38 | class Symbol; |
| 39 | class Metadata; |
| 40 | |
| 41 | // The following classes are C++ `closures` for iterating over objects, roots and spaces |
| 42 | |
| 43 | class Closure : public StackObj { }; |
| 44 | |
| 45 | // OopClosure is used for iterating through references to Java objects. |
| 46 | class OopClosure : public Closure { |
| 47 | public: |
| 48 | virtual void do_oop(oop* o) = 0; |
| 49 | virtual void do_oop(narrowOop* o) = 0; |
| 50 | }; |
| 51 | |
| 52 | class DoNothingClosure : public OopClosure { |
| 53 | public: |
| 54 | virtual void do_oop(oop* p) {} |
| 55 | virtual void do_oop(narrowOop* p) {} |
| 56 | }; |
| 57 | extern DoNothingClosure do_nothing_cl; |
| 58 | |
| 59 | // OopIterateClosure adds extra code to be run during oop iterations. |
| 60 | // This is needed by the GC and is extracted to a separate type to not |
| 61 | // pollute the OopClosure interface. |
| 62 | class OopIterateClosure : public OopClosure { |
| 63 | private: |
| 64 | ReferenceDiscoverer* _ref_discoverer; |
| 65 | |
| 66 | protected: |
| 67 | OopIterateClosure(ReferenceDiscoverer* rd) : _ref_discoverer(rd) { } |
| 68 | OopIterateClosure() : _ref_discoverer(NULL) { } |
| 69 | ~OopIterateClosure() { } |
| 70 | |
| 71 | void set_ref_discoverer_internal(ReferenceDiscoverer* rd) { _ref_discoverer = rd; } |
| 72 | |
| 73 | public: |
| 74 | ReferenceDiscoverer* ref_discoverer() const { return _ref_discoverer; } |
| 75 | |
| 76 | // Iteration of InstanceRefKlasses differ depending on the closure, |
| 77 | // the below enum describes the different alternatives. |
| 78 | enum ReferenceIterationMode { |
| 79 | DO_DISCOVERY, // Apply closure and discover references |
| 80 | DO_DISCOVERED_AND_DISCOVERY, // Apply closure to discovered field and do discovery |
| 81 | DO_FIELDS, // Apply closure to all fields |
| 82 | DO_FIELDS_EXCEPT_REFERENT // Apply closure to all fields except the referent field |
| 83 | }; |
| 84 | |
| 85 | // The default iteration mode is to do discovery. |
| 86 | virtual ReferenceIterationMode reference_iteration_mode() { return DO_DISCOVERY; } |
| 87 | |
| 88 | // If the do_metadata functions return "true", |
| 89 | // we invoke the following when running oop_iterate(): |
| 90 | // |
| 91 | // 1) do_klass on the header klass pointer. |
| 92 | // 2) do_klass on the klass pointer in the mirrors. |
| 93 | // 3) do_cld on the class loader data in class loaders. |
| 94 | |
| 95 | virtual bool do_metadata() = 0; |
| 96 | virtual void do_klass(Klass* k) = 0; |
| 97 | virtual void do_cld(ClassLoaderData* cld) = 0; |
| 98 | |
| 99 | #ifdef ASSERT |
| 100 | // Default verification of each visited oop field. |
| 101 | template <typename T> void verify(T* p); |
| 102 | |
| 103 | // Can be used by subclasses to turn off the default verification of oop fields. |
| 104 | virtual bool should_verify_oops() { return true; } |
| 105 | #endif |
| 106 | }; |
| 107 | |
| 108 | // An OopIterateClosure that can be used when there's no need to visit the Metadata. |
| 109 | class BasicOopIterateClosure : public OopIterateClosure { |
| 110 | public: |
| 111 | BasicOopIterateClosure(ReferenceDiscoverer* rd = NULL) : OopIterateClosure(rd) {} |
| 112 | |
| 113 | virtual bool do_metadata() { return false; } |
| 114 | virtual void do_klass(Klass* k) { ShouldNotReachHere(); } |
| 115 | virtual void do_cld(ClassLoaderData* cld) { ShouldNotReachHere(); } |
| 116 | }; |
| 117 | |
| 118 | class KlassClosure : public Closure { |
| 119 | public: |
| 120 | virtual void do_klass(Klass* k) = 0; |
| 121 | }; |
| 122 | |
| 123 | class CLDClosure : public Closure { |
| 124 | public: |
| 125 | virtual void do_cld(ClassLoaderData* cld) = 0; |
| 126 | }; |
| 127 | |
| 128 | class MetadataClosure : public Closure { |
| 129 | public: |
| 130 | virtual void do_metadata(Metadata* md) = 0; |
| 131 | }; |
| 132 | |
| 133 | |
| 134 | class CLDToOopClosure : public CLDClosure { |
| 135 | OopClosure* _oop_closure; |
| 136 | int _cld_claim; |
| 137 | |
| 138 | public: |
| 139 | CLDToOopClosure(OopClosure* oop_closure, |
| 140 | int cld_claim) : |
| 141 | _oop_closure(oop_closure), |
| 142 | _cld_claim(cld_claim) {} |
| 143 | |
| 144 | void do_cld(ClassLoaderData* cld); |
| 145 | }; |
| 146 | |
| 147 | class ClaimMetadataVisitingOopIterateClosure : public OopIterateClosure { |
| 148 | protected: |
| 149 | const int _claim; |
| 150 | |
| 151 | public: |
| 152 | ClaimMetadataVisitingOopIterateClosure(int claim, ReferenceDiscoverer* rd = NULL) : |
| 153 | OopIterateClosure(rd), |
| 154 | _claim(claim) { } |
| 155 | |
| 156 | virtual bool do_metadata() { return true; } |
| 157 | virtual void do_klass(Klass* k); |
| 158 | virtual void do_cld(ClassLoaderData* cld); |
| 159 | }; |
| 160 | |
| 161 | // The base class for all concurrent marking closures, |
| 162 | // that participates in class unloading. |
| 163 | // It's used to proxy through the metadata to the oops defined in them. |
| 164 | class MetadataVisitingOopIterateClosure: public ClaimMetadataVisitingOopIterateClosure { |
| 165 | public: |
| 166 | MetadataVisitingOopIterateClosure(ReferenceDiscoverer* rd = NULL); |
| 167 | }; |
| 168 | |
| 169 | // ObjectClosure is used for iterating through an object space |
| 170 | |
| 171 | class ObjectClosure : public Closure { |
| 172 | public: |
| 173 | // Called for each object. |
| 174 | virtual void do_object(oop obj) = 0; |
| 175 | }; |
| 176 | |
| 177 | |
| 178 | class BoolObjectClosure : public Closure { |
| 179 | public: |
| 180 | virtual bool do_object_b(oop obj) = 0; |
| 181 | }; |
| 182 | |
| 183 | class AlwaysTrueClosure: public BoolObjectClosure { |
| 184 | public: |
| 185 | bool do_object_b(oop p) { return true; } |
| 186 | }; |
| 187 | |
| 188 | class AlwaysFalseClosure : public BoolObjectClosure { |
| 189 | public: |
| 190 | bool do_object_b(oop p) { return false; } |
| 191 | }; |
| 192 | |
| 193 | // Applies an oop closure to all ref fields in objects iterated over in an |
| 194 | // object iteration. |
| 195 | class ObjectToOopClosure: public ObjectClosure { |
| 196 | OopIterateClosure* _cl; |
| 197 | public: |
| 198 | void do_object(oop obj); |
| 199 | ObjectToOopClosure(OopIterateClosure* cl) : _cl(cl) {} |
| 200 | }; |
| 201 | |
| 202 | // A version of ObjectClosure that is expected to be robust |
| 203 | // in the face of possibly uninitialized objects. |
| 204 | class ObjectClosureCareful : public ObjectClosure { |
| 205 | public: |
| 206 | virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0; |
| 207 | virtual size_t do_object_careful(oop p) = 0; |
| 208 | }; |
| 209 | |
| 210 | // The following are used in CompactibleFreeListSpace and |
| 211 | // ConcurrentMarkSweepGeneration. |
| 212 | |
| 213 | // Blk closure (abstract class) |
| 214 | class BlkClosure : public StackObj { |
| 215 | public: |
| 216 | virtual size_t do_blk(HeapWord* addr) = 0; |
| 217 | }; |
| 218 | |
| 219 | // A version of BlkClosure that is expected to be robust |
| 220 | // in the face of possibly uninitialized objects. |
| 221 | class BlkClosureCareful : public BlkClosure { |
| 222 | public: |
| 223 | size_t do_blk(HeapWord* addr) { |
| 224 | guarantee(false, "call do_blk_careful instead" ); |
| 225 | return 0; |
| 226 | } |
| 227 | virtual size_t do_blk_careful(HeapWord* addr) = 0; |
| 228 | }; |
| 229 | |
| 230 | // SpaceClosure is used for iterating over spaces |
| 231 | |
| 232 | class Space; |
| 233 | class CompactibleSpace; |
| 234 | |
| 235 | class SpaceClosure : public StackObj { |
| 236 | public: |
| 237 | // Called for each space |
| 238 | virtual void do_space(Space* s) = 0; |
| 239 | }; |
| 240 | |
| 241 | class CompactibleSpaceClosure : public StackObj { |
| 242 | public: |
| 243 | // Called for each compactible space |
| 244 | virtual void do_space(CompactibleSpace* s) = 0; |
| 245 | }; |
| 246 | |
| 247 | |
| 248 | // CodeBlobClosure is used for iterating through code blobs |
| 249 | // in the code cache or on thread stacks |
| 250 | |
| 251 | class CodeBlobClosure : public Closure { |
| 252 | public: |
| 253 | // Called for each code blob. |
| 254 | virtual void do_code_blob(CodeBlob* cb) = 0; |
| 255 | }; |
| 256 | |
| 257 | // Applies an oop closure to all ref fields in code blobs |
| 258 | // iterated over in an object iteration. |
| 259 | class CodeBlobToOopClosure : public CodeBlobClosure { |
| 260 | OopClosure* _cl; |
| 261 | bool _fix_relocations; |
| 262 | protected: |
| 263 | void do_nmethod(nmethod* nm); |
| 264 | public: |
| 265 | // If fix_relocations(), then cl must copy objects to their new location immediately to avoid |
| 266 | // patching nmethods with the old locations. |
| 267 | CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {} |
| 268 | virtual void do_code_blob(CodeBlob* cb); |
| 269 | |
| 270 | bool fix_relocations() const { return _fix_relocations; } |
| 271 | const static bool FixRelocations = true; |
| 272 | }; |
| 273 | |
| 274 | class MarkingCodeBlobClosure : public CodeBlobToOopClosure { |
| 275 | public: |
| 276 | MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {} |
| 277 | // Called for each code blob, but at most once per unique blob. |
| 278 | |
| 279 | virtual void do_code_blob(CodeBlob* cb); |
| 280 | }; |
| 281 | |
| 282 | class NMethodClosure : public Closure { |
| 283 | public: |
| 284 | virtual void do_nmethod(nmethod* n) = 0; |
| 285 | }; |
| 286 | |
| 287 | // MonitorClosure is used for iterating over monitors in the monitors cache |
| 288 | |
| 289 | class ObjectMonitor; |
| 290 | |
| 291 | class MonitorClosure : public StackObj { |
| 292 | public: |
| 293 | // called for each monitor in cache |
| 294 | virtual void do_monitor(ObjectMonitor* m) = 0; |
| 295 | }; |
| 296 | |
| 297 | // A closure that is applied without any arguments. |
| 298 | class VoidClosure : public StackObj { |
| 299 | public: |
| 300 | // I would have liked to declare this a pure virtual, but that breaks |
| 301 | // in mysterious ways, for unknown reasons. |
| 302 | virtual void do_void(); |
| 303 | }; |
| 304 | |
| 305 | |
| 306 | // YieldClosure is intended for use by iteration loops |
| 307 | // to incrementalize their work, allowing interleaving |
| 308 | // of an interruptable task so as to allow other |
| 309 | // threads to run (which may not otherwise be able to access |
| 310 | // exclusive resources, for instance). Additionally, the |
| 311 | // closure also allows for aborting an ongoing iteration |
| 312 | // by means of checking the return value from the polling |
| 313 | // call. |
| 314 | class YieldClosure : public StackObj { |
| 315 | public: |
| 316 | virtual bool should_return() = 0; |
| 317 | |
| 318 | // Yield on a fine-grain level. The check in case of not yielding should be very fast. |
| 319 | virtual bool should_return_fine_grain() { return false; } |
| 320 | }; |
| 321 | |
| 322 | // Abstract closure for serializing data (read or write). |
| 323 | |
| 324 | class SerializeClosure : public Closure { |
| 325 | public: |
| 326 | // Return bool indicating whether closure implements read or write. |
| 327 | virtual bool reading() const = 0; |
| 328 | |
| 329 | // Read/write the void pointer pointed to by p. |
| 330 | virtual void do_ptr(void** p) = 0; |
| 331 | |
| 332 | // Read/write the 32-bit unsigned integer pointed to by p. |
| 333 | virtual void do_u4(u4* p) = 0; |
| 334 | |
| 335 | // Read/write the bool pointed to by p. |
| 336 | virtual void do_bool(bool* p) = 0; |
| 337 | |
| 338 | // Read/write the region specified. |
| 339 | virtual void do_region(u_char* start, size_t size) = 0; |
| 340 | |
| 341 | // Check/write the tag. If reading, then compare the tag against |
| 342 | // the passed in value and fail is they don't match. This allows |
| 343 | // for verification that sections of the serialized data are of the |
| 344 | // correct length. |
| 345 | virtual void do_tag(int tag) = 0; |
| 346 | |
| 347 | // Read/write the oop |
| 348 | virtual void do_oop(oop* o) = 0; |
| 349 | |
| 350 | bool writing() { |
| 351 | return !reading(); |
| 352 | } |
| 353 | }; |
| 354 | |
| 355 | class SymbolClosure : public StackObj { |
| 356 | public: |
| 357 | virtual void do_symbol(Symbol**) = 0; |
| 358 | |
| 359 | // Clear LSB in symbol address; it can be set by CPSlot. |
| 360 | static Symbol* load_symbol(Symbol** p) { |
| 361 | return (Symbol*)(intptr_t(*p) & ~1); |
| 362 | } |
| 363 | |
| 364 | // Store symbol, adjusting new pointer if the original pointer was adjusted |
| 365 | // (symbol references in constant pool slots have their LSB set to 1). |
| 366 | static void store_symbol(Symbol** p, Symbol* sym) { |
| 367 | *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1)); |
| 368 | } |
| 369 | }; |
| 370 | |
| 371 | // Dispatches to the non-virtual functions if OopClosureType has |
| 372 | // a concrete implementation, otherwise a virtual call is taken. |
| 373 | class Devirtualizer { |
| 374 | public: |
| 375 | template <typename OopClosureType, typename T> static void do_oop_no_verify(OopClosureType* closure, T* p); |
| 376 | template <typename OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p); |
| 377 | template <typename OopClosureType> static void do_klass(OopClosureType* closure, Klass* k); |
| 378 | template <typename OopClosureType> static void do_cld(OopClosureType* closure, ClassLoaderData* cld); |
| 379 | template <typename OopClosureType> static bool do_metadata(OopClosureType* closure); |
| 380 | }; |
| 381 | |
| 382 | class OopIteratorClosureDispatch { |
| 383 | public: |
| 384 | template <typename OopClosureType> static void oop_oop_iterate(OopClosureType* cl, oop obj, Klass* klass); |
| 385 | template <typename OopClosureType> static void oop_oop_iterate(OopClosureType* cl, oop obj, Klass* klass, MemRegion mr); |
| 386 | template <typename OopClosureType> static void oop_oop_iterate_backwards(OopClosureType* cl, oop obj, Klass* klass); |
| 387 | }; |
| 388 | |
| 389 | #endif // SHARE_MEMORY_ITERATOR_HPP |
| 390 | |