1 | /* |
2 | * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP |
26 | #define SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP |
27 | |
28 | #include "gc/shared/cardTable.hpp" |
29 | #include "gc/shared/ptrQueue.hpp" |
30 | #include "memory/allocation.hpp" |
31 | |
32 | class G1DirtyCardQueueSet; |
33 | class G1FreeIdSet; |
34 | class Thread; |
35 | class Monitor; |
36 | |
37 | // A closure class for processing card table entries. Note that we don't |
38 | // require these closure objects to be stack-allocated. |
39 | class G1CardTableEntryClosure: public CHeapObj<mtGC> { |
40 | public: |
41 | typedef CardTable::CardValue CardValue; |
42 | |
43 | // Process the card whose card table entry is "card_ptr". If returns |
44 | // "false", terminate the iteration early. |
45 | virtual bool do_card_ptr(CardValue* card_ptr, uint worker_i) = 0; |
46 | }; |
47 | |
48 | // A ptrQueue whose elements are "oops", pointers to object heads. |
49 | class G1DirtyCardQueue: public PtrQueue { |
50 | protected: |
51 | virtual void handle_completed_buffer(); |
52 | |
53 | public: |
54 | G1DirtyCardQueue(G1DirtyCardQueueSet* qset); |
55 | |
56 | // Flush before destroying; queue may be used to capture pending work while |
57 | // doing something else, with auto-flush on completion. |
58 | ~G1DirtyCardQueue(); |
59 | |
60 | // Process queue entries and release resources. |
61 | void flush() { flush_impl(); } |
62 | |
63 | inline G1DirtyCardQueueSet* dirty_card_qset() const; |
64 | |
65 | // Compiler support. |
66 | static ByteSize byte_offset_of_index() { |
67 | return PtrQueue::byte_offset_of_index<G1DirtyCardQueue>(); |
68 | } |
69 | using PtrQueue::byte_width_of_index; |
70 | |
71 | static ByteSize byte_offset_of_buf() { |
72 | return PtrQueue::byte_offset_of_buf<G1DirtyCardQueue>(); |
73 | } |
74 | using PtrQueue::byte_width_of_buf; |
75 | |
76 | }; |
77 | |
78 | class G1DirtyCardQueueSet: public PtrQueueSet { |
79 | // Apply the closure to the elements of "node" from it's index to |
80 | // buffer_size. If all closure applications return true, then |
81 | // returns true. Stops processing after the first closure |
82 | // application that returns false, and returns false from this |
83 | // function. If "consume" is true, the node's index is updated to |
84 | // exclude the processed elements, e.g. up to the element for which |
85 | // the closure returned false. |
86 | bool apply_closure_to_buffer(G1CardTableEntryClosure* cl, |
87 | BufferNode* node, |
88 | bool consume, |
89 | uint worker_i = 0); |
90 | |
91 | // If there are more than stop_at completed buffers, pop one, apply |
92 | // the specified closure to its active elements, and return true. |
93 | // Otherwise return false. |
94 | // |
95 | // A completely processed buffer is freed. However, if a closure |
96 | // invocation returns false, processing is stopped and the partially |
97 | // processed buffer (with its index updated to exclude the processed |
98 | // elements, e.g. up to the element for which the closure returned |
99 | // false) is returned to the completed buffer set. |
100 | // |
101 | // If during_pause is true, stop_at must be zero, and the closure |
102 | // must never return false. |
103 | bool apply_closure_to_completed_buffer(G1CardTableEntryClosure* cl, |
104 | uint worker_i, |
105 | size_t stop_at, |
106 | bool during_pause); |
107 | |
108 | bool mut_process_buffer(BufferNode* node); |
109 | |
110 | // If the queue contains more buffers than configured here, the |
111 | // mutator must start doing some of the concurrent refinement work, |
112 | size_t _max_completed_buffers; |
113 | size_t _completed_buffers_padding; |
114 | static const size_t MaxCompletedBuffersUnlimited = ~size_t(0); |
115 | |
116 | G1FreeIdSet* _free_ids; |
117 | |
118 | // The number of completed buffers processed by mutator and rs thread, |
119 | // respectively. |
120 | jint _processed_buffers_mut; |
121 | jint _processed_buffers_rs_thread; |
122 | |
123 | // Current buffer node used for parallel iteration. |
124 | BufferNode* volatile _cur_par_buffer_node; |
125 | |
126 | public: |
127 | G1DirtyCardQueueSet(bool notify_when_complete = true); |
128 | ~G1DirtyCardQueueSet(); |
129 | |
130 | void initialize(Monitor* cbl_mon, |
131 | BufferNode::Allocator* allocator, |
132 | bool init_free_ids = false); |
133 | |
134 | // The number of parallel ids that can be claimed to allow collector or |
135 | // mutator threads to do card-processing work. |
136 | static uint num_par_ids(); |
137 | |
138 | static void handle_zero_index_for_thread(Thread* t); |
139 | |
140 | // Either process the entire buffer and return true, or enqueue the |
141 | // buffer and return false. If the buffer is completely processed, |
142 | // it can be reused in place. |
143 | bool process_or_enqueue_completed_buffer(BufferNode* node); |
144 | |
145 | // Apply G1RefineCardConcurrentlyClosure to completed buffers until there are stop_at |
146 | // completed buffers remaining. |
147 | bool refine_completed_buffer_concurrently(uint worker_i, size_t stop_at); |
148 | |
149 | // Apply the given closure to all completed buffers. The given closure's do_card_ptr |
150 | // must never return false. Must only be called during GC. |
151 | bool apply_closure_during_gc(G1CardTableEntryClosure* cl, uint worker_i); |
152 | |
153 | void reset_for_par_iteration() { _cur_par_buffer_node = completed_buffers_head(); } |
154 | // Applies the current closure to all completed buffers, non-consumptively. |
155 | // Can be used in parallel, all callers using the iteration state initialized |
156 | // by reset_for_par_iteration. |
157 | void par_apply_closure_to_all_completed_buffers(G1CardTableEntryClosure* cl); |
158 | |
159 | // If a full collection is happening, reset partial logs, and ignore |
160 | // completed ones: the full collection will make them all irrelevant. |
161 | void abandon_logs(); |
162 | |
163 | // If any threads have partial logs, add them to the global list of logs. |
164 | void concatenate_logs(); |
165 | |
166 | void set_max_completed_buffers(size_t m) { |
167 | _max_completed_buffers = m; |
168 | } |
169 | size_t max_completed_buffers() const { |
170 | return _max_completed_buffers; |
171 | } |
172 | |
173 | void set_completed_buffers_padding(size_t padding) { |
174 | _completed_buffers_padding = padding; |
175 | } |
176 | size_t completed_buffers_padding() const { |
177 | return _completed_buffers_padding; |
178 | } |
179 | |
180 | jint processed_buffers_mut() { |
181 | return _processed_buffers_mut; |
182 | } |
183 | jint processed_buffers_rs_thread() { |
184 | return _processed_buffers_rs_thread; |
185 | } |
186 | |
187 | }; |
188 | |
189 | inline G1DirtyCardQueueSet* G1DirtyCardQueue::dirty_card_qset() const { |
190 | return static_cast<G1DirtyCardQueueSet*>(qset()); |
191 | } |
192 | |
193 | #endif // SHARE_GC_G1_G1DIRTYCARDQUEUE_HPP |
194 | |