1 | /* |
2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_GC_SHARED_REFERENCEPROCESSOR_HPP |
26 | #define SHARE_GC_SHARED_REFERENCEPROCESSOR_HPP |
27 | |
28 | #include "gc/shared/referenceDiscoverer.hpp" |
29 | #include "gc/shared/referencePolicy.hpp" |
30 | #include "gc/shared/referenceProcessorStats.hpp" |
31 | #include "memory/referenceType.hpp" |
32 | #include "oops/instanceRefKlass.hpp" |
33 | |
34 | class AbstractRefProcTaskExecutor; |
35 | class GCTimer; |
36 | class ReferencePolicy; |
37 | class ReferenceProcessorPhaseTimes; |
38 | |
39 | // List of discovered references. |
40 | class DiscoveredList { |
41 | public: |
42 | DiscoveredList() : _oop_head(NULL), _compressed_head(0), _len(0) { } |
43 | inline oop head() const; |
44 | HeapWord* adr_head() { |
45 | return UseCompressedOops ? (HeapWord*)&_compressed_head : |
46 | (HeapWord*)&_oop_head; |
47 | } |
48 | inline void set_head(oop o); |
49 | inline bool is_empty() const; |
50 | size_t length() { return _len; } |
51 | void set_length(size_t len) { _len = len; } |
52 | void inc_length(size_t inc) { _len += inc; assert(_len > 0, "Error" ); } |
53 | void dec_length(size_t dec) { _len -= dec; } |
54 | |
55 | inline void clear(); |
56 | private: |
57 | // Set value depending on UseCompressedOops. This could be a template class |
58 | // but then we have to fix all the instantiations and declarations that use this class. |
59 | oop _oop_head; |
60 | narrowOop _compressed_head; |
61 | size_t _len; |
62 | }; |
63 | |
64 | // Iterator for the list of discovered references. |
65 | class DiscoveredListIterator { |
66 | private: |
67 | DiscoveredList& _refs_list; |
68 | HeapWord* _prev_discovered_addr; |
69 | oop _prev_discovered; |
70 | oop _current_discovered; |
71 | HeapWord* _current_discovered_addr; |
72 | oop _next_discovered; |
73 | |
74 | HeapWord* _referent_addr; |
75 | oop _referent; |
76 | |
77 | OopClosure* _keep_alive; |
78 | BoolObjectClosure* _is_alive; |
79 | |
80 | DEBUG_ONLY( |
81 | oop _first_seen; // cyclic linked list check |
82 | ) |
83 | |
84 | size_t _processed; |
85 | size_t _removed; |
86 | |
87 | public: |
88 | inline DiscoveredListIterator(DiscoveredList& refs_list, |
89 | OopClosure* keep_alive, |
90 | BoolObjectClosure* is_alive); |
91 | |
92 | // End Of List. |
93 | inline bool has_next() const { return _current_discovered != NULL; } |
94 | |
95 | // Get oop to the Reference object. |
96 | inline oop obj() const { return _current_discovered; } |
97 | |
98 | // Get oop to the referent object. |
99 | inline oop referent() const { return _referent; } |
100 | |
101 | // Returns true if referent is alive. |
102 | inline bool is_referent_alive() const { |
103 | return _is_alive->do_object_b(_referent); |
104 | } |
105 | |
106 | // Loads data for the current reference. |
107 | // The "allow_null_referent" argument tells us to allow for the possibility |
108 | // of a NULL referent in the discovered Reference object. This typically |
109 | // happens in the case of concurrent collectors that may have done the |
110 | // discovery concurrently, or interleaved, with mutator execution. |
111 | void load_ptrs(DEBUG_ONLY(bool allow_null_referent)); |
112 | |
113 | // Move to the next discovered reference. |
114 | inline void next() { |
115 | _prev_discovered_addr = _current_discovered_addr; |
116 | _prev_discovered = _current_discovered; |
117 | move_to_next(); |
118 | } |
119 | |
120 | // Remove the current reference from the list |
121 | void remove(); |
122 | |
123 | // Make the referent alive. |
124 | inline void make_referent_alive() { |
125 | if (UseCompressedOops) { |
126 | _keep_alive->do_oop((narrowOop*)_referent_addr); |
127 | } else { |
128 | _keep_alive->do_oop((oop*)_referent_addr); |
129 | } |
130 | } |
131 | |
132 | // Do enqueuing work, i.e. notifying the GC about the changed discovered pointers. |
133 | void enqueue(); |
134 | |
135 | // Move enqueued references to the reference pending list. |
136 | void complete_enqueue(); |
137 | |
138 | // NULL out referent pointer. |
139 | void clear_referent(); |
140 | |
141 | // Statistics |
142 | inline size_t processed() const { return _processed; } |
143 | inline size_t removed() const { return _removed; } |
144 | |
145 | inline void move_to_next() { |
146 | if (oopDesc::equals_raw(_current_discovered, _next_discovered)) { |
147 | // End of the list. |
148 | _current_discovered = NULL; |
149 | } else { |
150 | _current_discovered = _next_discovered; |
151 | } |
152 | assert(!oopDesc::equals_raw(_current_discovered, _first_seen), "cyclic ref_list found" ); |
153 | _processed++; |
154 | } |
155 | }; |
156 | |
157 | // The ReferenceProcessor class encapsulates the per-"collector" processing |
158 | // of java.lang.Reference objects for GC. The interface is useful for supporting |
159 | // a generational abstraction, in particular when there are multiple |
160 | // generations that are being independently collected -- possibly |
161 | // concurrently and/or incrementally. |
162 | // ReferenceProcessor class abstracts away from a generational setting |
163 | // by using a closure that determines whether a given reference or referent are |
164 | // subject to this ReferenceProcessor's discovery, thus allowing its use in a |
165 | // straightforward manner in a general, non-generational, non-contiguous generation |
166 | // (or heap) setting. |
167 | class ReferenceProcessor : public ReferenceDiscoverer { |
168 | friend class RefProcPhase1Task; |
169 | friend class RefProcPhase2Task; |
170 | friend class RefProcPhase3Task; |
171 | friend class RefProcPhase4Task; |
172 | public: |
173 | // Names of sub-phases of reference processing. Indicates the type of the reference |
174 | // processed and the associated phase number at the end. |
175 | enum RefProcSubPhases { |
176 | SoftRefSubPhase1, |
177 | SoftRefSubPhase2, |
178 | WeakRefSubPhase2, |
179 | FinalRefSubPhase2, |
180 | FinalRefSubPhase3, |
181 | PhantomRefSubPhase4, |
182 | RefSubPhaseMax |
183 | }; |
184 | |
185 | // Main phases of reference processing. |
186 | enum RefProcPhases { |
187 | RefPhase1, |
188 | RefPhase2, |
189 | RefPhase3, |
190 | RefPhase4, |
191 | RefPhaseMax |
192 | }; |
193 | |
194 | private: |
195 | size_t total_count(DiscoveredList lists[]) const; |
196 | void verify_total_count_zero(DiscoveredList lists[], const char* type) NOT_DEBUG_RETURN; |
197 | |
198 | // The SoftReference master timestamp clock |
199 | static jlong _soft_ref_timestamp_clock; |
200 | |
201 | BoolObjectClosure* _is_subject_to_discovery; // determines whether a given oop is subject |
202 | // to this ReferenceProcessor's discovery |
203 | // (and further processing). |
204 | |
205 | bool _discovering_refs; // true when discovery enabled |
206 | bool _discovery_is_atomic; // if discovery is atomic wrt |
207 | // other collectors in configuration |
208 | bool _discovery_is_mt; // true if reference discovery is MT. |
209 | |
210 | bool _enqueuing_is_done; // true if all weak references enqueued |
211 | bool _processing_is_mt; // true during phases when |
212 | // reference processing is MT. |
213 | uint _next_id; // round-robin mod _num_queues counter in |
214 | // support of work distribution |
215 | |
216 | bool _adjust_no_of_processing_threads; // allow dynamic adjustment of processing threads |
217 | // For collectors that do not keep GC liveness information |
218 | // in the object header, this field holds a closure that |
219 | // helps the reference processor determine the reachability |
220 | // of an oop. It is currently initialized to NULL for all |
221 | // collectors except for CMS and G1. |
222 | BoolObjectClosure* ; |
223 | |
224 | // Soft ref clearing policies |
225 | // . the default policy |
226 | static ReferencePolicy* _default_soft_ref_policy; |
227 | // . the "clear all" policy |
228 | static ReferencePolicy* _always_clear_soft_ref_policy; |
229 | // . the current policy below is either one of the above |
230 | ReferencePolicy* _current_soft_ref_policy; |
231 | |
232 | // The discovered ref lists themselves |
233 | |
234 | // The active MT'ness degree of the queues below |
235 | uint _num_queues; |
236 | // The maximum MT'ness degree of the queues below |
237 | uint _max_num_queues; |
238 | |
239 | // Master array of discovered oops |
240 | DiscoveredList* _discovered_refs; |
241 | |
242 | // Arrays of lists of oops, one per thread (pointers into master array above) |
243 | DiscoveredList* _discoveredSoftRefs; |
244 | DiscoveredList* _discoveredWeakRefs; |
245 | DiscoveredList* _discoveredFinalRefs; |
246 | DiscoveredList* _discoveredPhantomRefs; |
247 | |
248 | // Phase 1: Re-evaluate soft ref policy. |
249 | void process_soft_ref_reconsider(BoolObjectClosure* is_alive, |
250 | OopClosure* keep_alive, |
251 | VoidClosure* complete_gc, |
252 | AbstractRefProcTaskExecutor* task_executor, |
253 | ReferenceProcessorPhaseTimes* phase_times); |
254 | |
255 | // Phase 2: Drop Soft/Weak/Final references with a NULL or live referent, and clear |
256 | // and enqueue non-Final references. |
257 | void process_soft_weak_final_refs(BoolObjectClosure* is_alive, |
258 | OopClosure* keep_alive, |
259 | VoidClosure* complete_gc, |
260 | AbstractRefProcTaskExecutor* task_executor, |
261 | ReferenceProcessorPhaseTimes* phase_times); |
262 | |
263 | // Phase 3: Keep alive followers of Final references, and enqueue. |
264 | void process_final_keep_alive(OopClosure* keep_alive, |
265 | VoidClosure* complete_gc, |
266 | AbstractRefProcTaskExecutor* task_executor, |
267 | ReferenceProcessorPhaseTimes* phase_times); |
268 | |
269 | // Phase 4: Drop and keep alive live Phantom references, or clear and enqueue if dead. |
270 | void process_phantom_refs(BoolObjectClosure* is_alive, |
271 | OopClosure* keep_alive, |
272 | VoidClosure* complete_gc, |
273 | AbstractRefProcTaskExecutor* task_executor, |
274 | ReferenceProcessorPhaseTimes* phase_times); |
275 | |
276 | // Work methods used by the process_* methods. All methods return the number of |
277 | // removed elements. |
278 | |
279 | // (SoftReferences only) Traverse the list and remove any SoftReferences whose |
280 | // referents are not alive, but that should be kept alive for policy reasons. |
281 | // Keep alive the transitive closure of all such referents. |
282 | size_t process_soft_ref_reconsider_work(DiscoveredList& refs_list, |
283 | ReferencePolicy* policy, |
284 | BoolObjectClosure* is_alive, |
285 | OopClosure* keep_alive, |
286 | VoidClosure* complete_gc); |
287 | |
288 | // Traverse the list and remove any Refs whose referents are alive, |
289 | // or NULL if discovery is not atomic. Enqueue and clear the reference for |
290 | // others if do_enqueue_and_clear is set. |
291 | size_t process_soft_weak_final_refs_work(DiscoveredList& refs_list, |
292 | BoolObjectClosure* is_alive, |
293 | OopClosure* keep_alive, |
294 | bool do_enqueue_and_clear); |
295 | |
296 | // Keep alive followers of referents for FinalReferences. Must only be called for |
297 | // those. |
298 | size_t process_final_keep_alive_work(DiscoveredList& refs_list, |
299 | OopClosure* keep_alive, |
300 | VoidClosure* complete_gc); |
301 | |
302 | size_t process_phantom_refs_work(DiscoveredList& refs_list, |
303 | BoolObjectClosure* is_alive, |
304 | OopClosure* keep_alive, |
305 | VoidClosure* complete_gc); |
306 | |
307 | public: |
308 | static int number_of_subclasses_of_ref() { return (REF_PHANTOM - REF_OTHER); } |
309 | |
310 | uint num_queues() const { return _num_queues; } |
311 | uint max_num_queues() const { return _max_num_queues; } |
312 | void set_active_mt_degree(uint v); |
313 | |
314 | ReferencePolicy* setup_policy(bool always_clear) { |
315 | _current_soft_ref_policy = always_clear ? |
316 | _always_clear_soft_ref_policy : _default_soft_ref_policy; |
317 | _current_soft_ref_policy->setup(); // snapshot the policy threshold |
318 | return _current_soft_ref_policy; |
319 | } |
320 | |
321 | // "Preclean" all the discovered reference lists by removing references that |
322 | // are active (e.g. due to the mutator calling enqueue()) or with NULL or |
323 | // strongly reachable referents. |
324 | // The first argument is a predicate on an oop that indicates |
325 | // its (strong) reachability and the fourth is a closure that |
326 | // may be used to incrementalize or abort the precleaning process. |
327 | // The caller is responsible for taking care of potential |
328 | // interference with concurrent operations on these lists |
329 | // (or predicates involved) by other threads. |
330 | void preclean_discovered_references(BoolObjectClosure* is_alive, |
331 | OopClosure* keep_alive, |
332 | VoidClosure* complete_gc, |
333 | YieldClosure* yield, |
334 | GCTimer* gc_timer); |
335 | |
336 | private: |
337 | // Returns the name of the discovered reference list |
338 | // occupying the i / _num_queues slot. |
339 | const char* list_name(uint i); |
340 | |
341 | // "Preclean" the given discovered reference list by removing references with |
342 | // the attributes mentioned in preclean_discovered_references(). |
343 | // Supports both normal and fine grain yielding. |
344 | // Returns whether the operation should be aborted. |
345 | bool preclean_discovered_reflist(DiscoveredList& refs_list, |
346 | BoolObjectClosure* is_alive, |
347 | OopClosure* keep_alive, |
348 | VoidClosure* complete_gc, |
349 | YieldClosure* yield); |
350 | |
351 | // round-robin mod _num_queues (not: _not_ mod _max_num_queues) |
352 | uint next_id() { |
353 | uint id = _next_id; |
354 | assert(!_discovery_is_mt, "Round robin should only be used in serial discovery" ); |
355 | if (++_next_id == _num_queues) { |
356 | _next_id = 0; |
357 | } |
358 | assert(_next_id < _num_queues, "_next_id %u _num_queues %u _max_num_queues %u" , _next_id, _num_queues, _max_num_queues); |
359 | return id; |
360 | } |
361 | DiscoveredList* get_discovered_list(ReferenceType rt); |
362 | inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj, |
363 | HeapWord* discovered_addr); |
364 | |
365 | void clear_discovered_references(DiscoveredList& refs_list); |
366 | |
367 | void log_reflist(const char* prefix, DiscoveredList list[], uint num_active_queues); |
368 | void log_reflist_counts(DiscoveredList ref_lists[], uint num_active_queues) PRODUCT_RETURN; |
369 | |
370 | // Balances reference queues. |
371 | void balance_queues(DiscoveredList refs_lists[]); |
372 | bool need_balance_queues(DiscoveredList refs_lists[]); |
373 | |
374 | // If there is need to balance the given queue, do it. |
375 | void maybe_balance_queues(DiscoveredList refs_lists[]); |
376 | |
377 | // Update (advance) the soft ref master clock field. |
378 | void update_soft_ref_master_clock(); |
379 | |
380 | bool is_subject_to_discovery(oop const obj) const; |
381 | |
382 | bool is_mt_processing_set_up(AbstractRefProcTaskExecutor* task_executor) const; |
383 | |
384 | public: |
385 | // Default parameters give you a vanilla reference processor. |
386 | ReferenceProcessor(BoolObjectClosure* is_subject_to_discovery, |
387 | bool mt_processing = false, uint mt_processing_degree = 1, |
388 | bool mt_discovery = false, uint mt_discovery_degree = 1, |
389 | bool atomic_discovery = true, |
390 | BoolObjectClosure* = NULL, |
391 | bool adjust_no_of_processing_threads = false); |
392 | |
393 | // RefDiscoveryPolicy values |
394 | enum DiscoveryPolicy { |
395 | ReferenceBasedDiscovery = 0, |
396 | ReferentBasedDiscovery = 1, |
397 | DiscoveryPolicyMin = ReferenceBasedDiscovery, |
398 | DiscoveryPolicyMax = ReferentBasedDiscovery |
399 | }; |
400 | |
401 | static void init_statics(); |
402 | |
403 | // get and set "is_alive_non_header" field |
404 | BoolObjectClosure* () { |
405 | return _is_alive_non_header; |
406 | } |
407 | void (BoolObjectClosure* ) { |
408 | _is_alive_non_header = is_alive_non_header; |
409 | } |
410 | |
411 | BoolObjectClosure* is_subject_to_discovery_closure() const { return _is_subject_to_discovery; } |
412 | void set_is_subject_to_discovery_closure(BoolObjectClosure* cl) { _is_subject_to_discovery = cl; } |
413 | |
414 | // start and stop weak ref discovery |
415 | void enable_discovery(bool check_no_refs = true); |
416 | void disable_discovery() { _discovering_refs = false; } |
417 | bool discovery_enabled() { return _discovering_refs; } |
418 | |
419 | // whether discovery is atomic wrt other collectors |
420 | bool discovery_is_atomic() const { return _discovery_is_atomic; } |
421 | void set_atomic_discovery(bool atomic) { _discovery_is_atomic = atomic; } |
422 | |
423 | // whether discovery is done by multiple threads same-old-timeously |
424 | bool discovery_is_mt() const { return _discovery_is_mt; } |
425 | void set_mt_discovery(bool mt) { _discovery_is_mt = mt; } |
426 | |
427 | // Whether we are in a phase when _processing_ is MT. |
428 | bool processing_is_mt() const { return _processing_is_mt; } |
429 | void set_mt_processing(bool mt) { _processing_is_mt = mt; } |
430 | |
431 | // whether all enqueueing of weak references is complete |
432 | bool enqueuing_is_done() { return _enqueuing_is_done; } |
433 | void set_enqueuing_is_done(bool v) { _enqueuing_is_done = v; } |
434 | |
435 | // iterate over oops |
436 | void weak_oops_do(OopClosure* f); // weak roots |
437 | |
438 | void verify_list(DiscoveredList& ref_list); |
439 | |
440 | // Discover a Reference object, using appropriate discovery criteria |
441 | virtual bool discover_reference(oop obj, ReferenceType rt); |
442 | |
443 | // Has discovered references that need handling |
444 | bool has_discovered_references(); |
445 | |
446 | // Process references found during GC (called by the garbage collector) |
447 | ReferenceProcessorStats |
448 | process_discovered_references(BoolObjectClosure* is_alive, |
449 | OopClosure* keep_alive, |
450 | VoidClosure* complete_gc, |
451 | AbstractRefProcTaskExecutor* task_executor, |
452 | ReferenceProcessorPhaseTimes* phase_times); |
453 | |
454 | // If a discovery is in process that is being superceded, abandon it: all |
455 | // the discovered lists will be empty, and all the objects on them will |
456 | // have NULL discovered fields. Must be called only at a safepoint. |
457 | void abandon_partial_discovery(); |
458 | |
459 | size_t total_reference_count(ReferenceType rt) const; |
460 | |
461 | // debugging |
462 | void verify_no_references_recorded() PRODUCT_RETURN; |
463 | void verify_referent(oop obj) PRODUCT_RETURN; |
464 | |
465 | bool adjust_no_of_processing_threads() const { return _adjust_no_of_processing_threads; } |
466 | }; |
467 | |
468 | // A subject-to-discovery closure that uses a single memory span to determine the area that |
469 | // is subject to discovery. Useful for collectors which have contiguous generations. |
470 | class SpanSubjectToDiscoveryClosure : public BoolObjectClosure { |
471 | MemRegion _span; |
472 | |
473 | public: |
474 | SpanSubjectToDiscoveryClosure() : BoolObjectClosure(), _span() { } |
475 | SpanSubjectToDiscoveryClosure(MemRegion span) : BoolObjectClosure(), _span(span) { } |
476 | |
477 | MemRegion span() const { return _span; } |
478 | |
479 | void set_span(MemRegion mr) { |
480 | _span = mr; |
481 | } |
482 | |
483 | virtual bool do_object_b(oop obj) { |
484 | return _span.contains(obj); |
485 | } |
486 | }; |
487 | |
488 | // A utility class to disable reference discovery in |
489 | // the scope which contains it, for given ReferenceProcessor. |
490 | class NoRefDiscovery: StackObj { |
491 | private: |
492 | ReferenceProcessor* _rp; |
493 | bool _was_discovering_refs; |
494 | public: |
495 | NoRefDiscovery(ReferenceProcessor* rp) : _rp(rp) { |
496 | _was_discovering_refs = _rp->discovery_enabled(); |
497 | if (_was_discovering_refs) { |
498 | _rp->disable_discovery(); |
499 | } |
500 | } |
501 | |
502 | ~NoRefDiscovery() { |
503 | if (_was_discovering_refs) { |
504 | _rp->enable_discovery(false /*check_no_refs*/); |
505 | } |
506 | } |
507 | }; |
508 | |
509 | // A utility class to temporarily mutate the subject discovery closure of the |
510 | // given ReferenceProcessor in the scope that contains it. |
511 | class ReferenceProcessorSubjectToDiscoveryMutator : StackObj { |
512 | ReferenceProcessor* _rp; |
513 | BoolObjectClosure* _saved_cl; |
514 | |
515 | public: |
516 | ReferenceProcessorSubjectToDiscoveryMutator(ReferenceProcessor* rp, BoolObjectClosure* cl): |
517 | _rp(rp) { |
518 | _saved_cl = _rp->is_subject_to_discovery_closure(); |
519 | _rp->set_is_subject_to_discovery_closure(cl); |
520 | } |
521 | |
522 | ~ReferenceProcessorSubjectToDiscoveryMutator() { |
523 | _rp->set_is_subject_to_discovery_closure(_saved_cl); |
524 | } |
525 | }; |
526 | |
527 | // A utility class to temporarily mutate the span of the |
528 | // given ReferenceProcessor in the scope that contains it. |
529 | class ReferenceProcessorSpanMutator : StackObj { |
530 | ReferenceProcessor* _rp; |
531 | SpanSubjectToDiscoveryClosure _discoverer; |
532 | BoolObjectClosure* _old_discoverer; |
533 | |
534 | public: |
535 | ReferenceProcessorSpanMutator(ReferenceProcessor* rp, |
536 | MemRegion span): |
537 | _rp(rp), |
538 | _discoverer(span), |
539 | _old_discoverer(rp->is_subject_to_discovery_closure()) { |
540 | |
541 | rp->set_is_subject_to_discovery_closure(&_discoverer); |
542 | } |
543 | |
544 | ~ReferenceProcessorSpanMutator() { |
545 | _rp->set_is_subject_to_discovery_closure(_old_discoverer); |
546 | } |
547 | }; |
548 | |
549 | // A utility class to temporarily change the MT'ness of |
550 | // reference discovery for the given ReferenceProcessor |
551 | // in the scope that contains it. |
552 | class ReferenceProcessorMTDiscoveryMutator: StackObj { |
553 | private: |
554 | ReferenceProcessor* _rp; |
555 | bool _saved_mt; |
556 | |
557 | public: |
558 | ReferenceProcessorMTDiscoveryMutator(ReferenceProcessor* rp, |
559 | bool mt): |
560 | _rp(rp) { |
561 | _saved_mt = _rp->discovery_is_mt(); |
562 | _rp->set_mt_discovery(mt); |
563 | } |
564 | |
565 | ~ReferenceProcessorMTDiscoveryMutator() { |
566 | _rp->set_mt_discovery(_saved_mt); |
567 | } |
568 | }; |
569 | |
570 | // A utility class to temporarily change the disposition |
571 | // of the "is_alive_non_header" closure field of the |
572 | // given ReferenceProcessor in the scope that contains it. |
573 | class ReferenceProcessorIsAliveMutator: StackObj { |
574 | private: |
575 | ReferenceProcessor* _rp; |
576 | BoolObjectClosure* _saved_cl; |
577 | |
578 | public: |
579 | ReferenceProcessorIsAliveMutator(ReferenceProcessor* rp, |
580 | BoolObjectClosure* cl): |
581 | _rp(rp) { |
582 | _saved_cl = _rp->is_alive_non_header(); |
583 | _rp->set_is_alive_non_header(cl); |
584 | } |
585 | |
586 | ~ReferenceProcessorIsAliveMutator() { |
587 | _rp->set_is_alive_non_header(_saved_cl); |
588 | } |
589 | }; |
590 | |
591 | // A utility class to temporarily change the disposition |
592 | // of the "discovery_is_atomic" field of the |
593 | // given ReferenceProcessor in the scope that contains it. |
594 | class ReferenceProcessorAtomicMutator: StackObj { |
595 | private: |
596 | ReferenceProcessor* _rp; |
597 | bool _saved_atomic_discovery; |
598 | |
599 | public: |
600 | ReferenceProcessorAtomicMutator(ReferenceProcessor* rp, |
601 | bool atomic): |
602 | _rp(rp) { |
603 | _saved_atomic_discovery = _rp->discovery_is_atomic(); |
604 | _rp->set_atomic_discovery(atomic); |
605 | } |
606 | |
607 | ~ReferenceProcessorAtomicMutator() { |
608 | _rp->set_atomic_discovery(_saved_atomic_discovery); |
609 | } |
610 | }; |
611 | |
612 | |
613 | // A utility class to temporarily change the MT processing |
614 | // disposition of the given ReferenceProcessor instance |
615 | // in the scope that contains it. |
616 | class ReferenceProcessorMTProcMutator: StackObj { |
617 | private: |
618 | ReferenceProcessor* _rp; |
619 | bool _saved_mt; |
620 | |
621 | public: |
622 | ReferenceProcessorMTProcMutator(ReferenceProcessor* rp, |
623 | bool mt): |
624 | _rp(rp) { |
625 | _saved_mt = _rp->processing_is_mt(); |
626 | _rp->set_mt_processing(mt); |
627 | } |
628 | |
629 | ~ReferenceProcessorMTProcMutator() { |
630 | _rp->set_mt_processing(_saved_mt); |
631 | } |
632 | }; |
633 | |
634 | // This class is an interface used to implement task execution for the |
635 | // reference processing. |
636 | class AbstractRefProcTaskExecutor { |
637 | public: |
638 | |
639 | // Abstract tasks to execute. |
640 | class ProcessTask; |
641 | |
642 | // Executes a task using worker threads. |
643 | virtual void execute(ProcessTask& task, uint ergo_workers) = 0; |
644 | |
645 | // Switch to single threaded mode. |
646 | virtual void set_single_threaded_mode() { }; |
647 | }; |
648 | |
649 | // Abstract reference processing task to execute. |
650 | class AbstractRefProcTaskExecutor::ProcessTask { |
651 | protected: |
652 | ReferenceProcessor& _ref_processor; |
653 | // Indicates whether the phase could generate work that should be balanced across |
654 | // threads after execution. |
655 | bool _marks_oops_alive; |
656 | ReferenceProcessorPhaseTimes* _phase_times; |
657 | |
658 | ProcessTask(ReferenceProcessor& ref_processor, |
659 | bool marks_oops_alive, |
660 | ReferenceProcessorPhaseTimes* phase_times) |
661 | : _ref_processor(ref_processor), |
662 | _marks_oops_alive(marks_oops_alive), |
663 | _phase_times(phase_times) |
664 | { } |
665 | |
666 | public: |
667 | virtual void work(uint worker_id, |
668 | BoolObjectClosure& is_alive, |
669 | OopClosure& keep_alive, |
670 | VoidClosure& complete_gc) = 0; |
671 | |
672 | bool marks_oops_alive() const { return _marks_oops_alive; } |
673 | }; |
674 | |
675 | // Temporarily change the number of workers based on given reference count. |
676 | // This ergonomically decided worker count will be used to activate worker threads. |
677 | class RefProcMTDegreeAdjuster : public StackObj { |
678 | typedef ReferenceProcessor::RefProcPhases RefProcPhases; |
679 | |
680 | ReferenceProcessor* _rp; |
681 | bool _saved_mt_processing; |
682 | uint _saved_num_queues; |
683 | |
684 | // Calculate based on total of references. |
685 | uint ergo_proc_thread_count(size_t ref_count, |
686 | uint max_threads, |
687 | RefProcPhases phase) const; |
688 | |
689 | bool use_max_threads(RefProcPhases phase) const; |
690 | |
691 | public: |
692 | RefProcMTDegreeAdjuster(ReferenceProcessor* rp, |
693 | RefProcPhases phase, |
694 | size_t ref_count); |
695 | ~RefProcMTDegreeAdjuster(); |
696 | }; |
697 | |
698 | #endif // SHARE_GC_SHARED_REFERENCEPROCESSOR_HPP |
699 | |