1 | /* |
2 | * Copyright (c) 2011, 2019, Oracle and/or its affiliates. All rights reserved. |
3 | * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
4 | * |
5 | * This code is free software; you can redistribute it and/or modify it |
6 | * under the terms of the GNU General Public License version 2 only, as |
7 | * published by the Free Software Foundation. |
8 | * |
9 | * This code is distributed in the hope that it will be useful, but WITHOUT |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
12 | * version 2 for more details (a copy is included in the LICENSE file that |
13 | * accompanied this code). |
14 | * |
15 | * You should have received a copy of the GNU General Public License version |
16 | * 2 along with this work; if not, write to the Free Software Foundation, |
17 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
18 | * |
19 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
20 | * or visit www.oracle.com if you need additional information or have any |
21 | * questions. |
22 | * |
23 | */ |
24 | |
25 | #ifndef SHARE_GC_G1_G1ALLOCREGION_HPP |
26 | #define SHARE_GC_G1_G1ALLOCREGION_HPP |
27 | |
28 | #include "gc/g1/heapRegion.hpp" |
29 | #include "gc/g1/g1EvacStats.hpp" |
30 | #include "gc/g1/g1HeapRegionAttr.hpp" |
31 | |
32 | class G1CollectedHeap; |
33 | |
34 | // A class that holds a region that is active in satisfying allocation |
35 | // requests, potentially issued in parallel. When the active region is |
36 | // full it will be retired and replaced with a new one. The |
37 | // implementation assumes that fast-path allocations will be lock-free |
38 | // and a lock will need to be taken when the active region needs to be |
39 | // replaced. |
40 | |
41 | class G1AllocRegion { |
42 | |
43 | private: |
44 | // The active allocating region we are currently allocating out |
45 | // of. The invariant is that if this object is initialized (i.e., |
46 | // init() has been called and release() has not) then _alloc_region |
47 | // is either an active allocating region or the dummy region (i.e., |
48 | // it can never be NULL) and this object can be used to satisfy |
49 | // allocation requests. If this object is not initialized |
50 | // (i.e. init() has not been called or release() has been called) |
51 | // then _alloc_region is NULL and this object should not be used to |
52 | // satisfy allocation requests (it was done this way to force the |
53 | // correct use of init() and release()). |
54 | HeapRegion* volatile _alloc_region; |
55 | |
56 | // It keeps track of the distinct number of regions that are used |
57 | // for allocation in the active interval of this object, i.e., |
58 | // between a call to init() and a call to release(). The count |
59 | // mostly includes regions that are freshly allocated, as well as |
60 | // the region that is re-used using the set() method. This count can |
61 | // be used in any heuristics that might want to bound how many |
62 | // distinct regions this object can used during an active interval. |
63 | uint _count; |
64 | |
65 | // When we set up a new active region we save its used bytes in this |
66 | // field so that, when we retire it, we can calculate how much space |
67 | // we allocated in it. |
68 | size_t _used_bytes_before; |
69 | |
70 | // When true, indicates that allocate calls should do BOT updates. |
71 | const bool _bot_updates; |
72 | |
73 | // Useful for debugging and tracing. |
74 | const char* _name; |
75 | |
76 | // A dummy region (i.e., it's been allocated specially for this |
77 | // purpose and it is not part of the heap) that is full (i.e., top() |
78 | // == end()). When we don't have a valid active region we make |
79 | // _alloc_region point to this. This allows us to skip checking |
80 | // whether the _alloc_region is NULL or not. |
81 | static HeapRegion* _dummy_region; |
82 | |
83 | // After a region is allocated by alloc_new_region, this |
84 | // method is used to set it as the active alloc_region |
85 | void update_alloc_region(HeapRegion* alloc_region); |
86 | |
87 | // Allocate a new active region and use it to perform a word_size |
88 | // allocation. The force parameter will be passed on to |
89 | // G1CollectedHeap::allocate_new_alloc_region() and tells it to try |
90 | // to allocate a new region even if the max has been reached. |
91 | HeapWord* new_alloc_region_and_allocate(size_t word_size, bool force); |
92 | |
93 | protected: |
94 | // Reset the alloc region to point a the dummy region. |
95 | void reset_alloc_region(); |
96 | |
97 | // Perform a non-MT-safe allocation out of the given region. |
98 | inline HeapWord* allocate(HeapRegion* alloc_region, |
99 | size_t word_size); |
100 | |
101 | // Perform a MT-safe allocation out of the given region. |
102 | inline HeapWord* par_allocate(HeapRegion* alloc_region, |
103 | size_t word_size); |
104 | // Perform a MT-safe allocation out of the given region, with the given |
105 | // minimum and desired size. Returns the actual size allocated (between |
106 | // minimum and desired size) in actual_word_size if the allocation has been |
107 | // successful. |
108 | inline HeapWord* par_allocate(HeapRegion* alloc_region, |
109 | size_t min_word_size, |
110 | size_t desired_word_size, |
111 | size_t* actual_word_size); |
112 | |
113 | // Ensure that the region passed as a parameter has been filled up |
114 | // so that noone else can allocate out of it any more. |
115 | // Returns the number of bytes that have been wasted by filled up |
116 | // the space. |
117 | size_t fill_up_remaining_space(HeapRegion* alloc_region); |
118 | |
119 | // Retire the active allocating region. If fill_up is true then make |
120 | // sure that the region is full before we retire it so that no one |
121 | // else can allocate out of it. |
122 | // Returns the number of bytes that have been filled up during retire. |
123 | virtual size_t retire(bool fill_up); |
124 | |
125 | size_t retire_internal(HeapRegion* alloc_region, bool fill_up); |
126 | |
127 | // For convenience as subclasses use it. |
128 | static G1CollectedHeap* _g1h; |
129 | |
130 | virtual HeapRegion* allocate_new_region(size_t word_size, bool force) = 0; |
131 | virtual void retire_region(HeapRegion* alloc_region, |
132 | size_t allocated_bytes) = 0; |
133 | |
134 | G1AllocRegion(const char* name, bool bot_updates); |
135 | |
136 | public: |
137 | static void setup(G1CollectedHeap* g1h, HeapRegion* dummy_region); |
138 | |
139 | HeapRegion* get() const { |
140 | HeapRegion * hr = _alloc_region; |
141 | // Make sure that the dummy region does not escape this class. |
142 | return (hr == _dummy_region) ? NULL : hr; |
143 | } |
144 | |
145 | uint count() { return _count; } |
146 | |
147 | // The following two are the building blocks for the allocation method. |
148 | |
149 | // First-level allocation: Should be called without holding a |
150 | // lock. It will try to allocate lock-free out of the active region, |
151 | // or return NULL if it was unable to. |
152 | inline HeapWord* attempt_allocation(size_t word_size); |
153 | // Perform an allocation out of the current allocation region, with the given |
154 | // minimum and desired size. Returns the actual size allocated (between |
155 | // minimum and desired size) in actual_word_size if the allocation has been |
156 | // successful. |
157 | // Should be called without holding a lock. It will try to allocate lock-free |
158 | // out of the active region, or return NULL if it was unable to. |
159 | inline HeapWord* attempt_allocation(size_t min_word_size, |
160 | size_t desired_word_size, |
161 | size_t* actual_word_size); |
162 | |
163 | // Second-level allocation: Should be called while holding a |
164 | // lock. It will try to first allocate lock-free out of the active |
165 | // region or, if it's unable to, it will try to replace the active |
166 | // alloc region with a new one. We require that the caller takes the |
167 | // appropriate lock before calling this so that it is easier to make |
168 | // it conform to its locking protocol. |
169 | inline HeapWord* attempt_allocation_locked(size_t word_size); |
170 | // Same as attempt_allocation_locked(size_t, bool), but allowing specification |
171 | // of minimum word size of the block in min_word_size, and the maximum word |
172 | // size of the allocation in desired_word_size. The actual size of the block is |
173 | // returned in actual_word_size. |
174 | inline HeapWord* attempt_allocation_locked(size_t min_word_size, |
175 | size_t desired_word_size, |
176 | size_t* actual_word_size); |
177 | |
178 | // Should be called to allocate a new region even if the max of this |
179 | // type of regions has been reached. Should only be called if other |
180 | // allocation attempts have failed and we are not holding a valid |
181 | // active region. |
182 | inline HeapWord* attempt_allocation_force(size_t word_size); |
183 | |
184 | // Should be called before we start using this object. |
185 | virtual void init(); |
186 | |
187 | // This can be used to set the active region to a specific |
188 | // region. (Use Example: we try to retain the last old GC alloc |
189 | // region that we've used during a GC and we can use set() to |
190 | // re-instate it at the beginning of the next GC.) |
191 | void set(HeapRegion* alloc_region); |
192 | |
193 | // Should be called when we want to release the active region which |
194 | // is returned after it's been retired. |
195 | virtual HeapRegion* release(); |
196 | |
197 | void trace(const char* str, |
198 | size_t min_word_size = 0, |
199 | size_t desired_word_size = 0, |
200 | size_t actual_word_size = 0, |
201 | HeapWord* result = NULL) PRODUCT_RETURN; |
202 | }; |
203 | |
204 | class MutatorAllocRegion : public G1AllocRegion { |
205 | private: |
206 | // Keeps track of the total waste generated during the current |
207 | // mutator phase. |
208 | size_t _wasted_bytes; |
209 | |
210 | // Retained allocation region. Used to lower the waste generated |
211 | // during mutation by having two active regions if the free space |
212 | // in a region about to be retired still could fit a TLAB. |
213 | HeapRegion* volatile _retained_alloc_region; |
214 | |
215 | // Decide if the region should be retained, based on the free size |
216 | // in it and the free size in the currently retained region, if any. |
217 | bool should_retain(HeapRegion* region); |
218 | protected: |
219 | virtual HeapRegion* allocate_new_region(size_t word_size, bool force); |
220 | virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); |
221 | virtual size_t retire(bool fill_up); |
222 | public: |
223 | MutatorAllocRegion() |
224 | : G1AllocRegion("Mutator Alloc Region" , false /* bot_updates */), |
225 | _wasted_bytes(0), |
226 | _retained_alloc_region(NULL) { } |
227 | |
228 | // Returns the combined used memory in the current alloc region and |
229 | // the retained alloc region. |
230 | size_t used_in_alloc_regions(); |
231 | |
232 | // Perform an allocation out of the retained allocation region, with the given |
233 | // minimum and desired size. Returns the actual size allocated (between |
234 | // minimum and desired size) in actual_word_size if the allocation has been |
235 | // successful. |
236 | // Should be called without holding a lock. It will try to allocate lock-free |
237 | // out of the retained region, or return NULL if it was unable to. |
238 | inline HeapWord* attempt_retained_allocation(size_t min_word_size, |
239 | size_t desired_word_size, |
240 | size_t* actual_word_size); |
241 | |
242 | // This specialization of release() makes sure that the retained alloc |
243 | // region is retired and set to NULL. |
244 | virtual HeapRegion* release(); |
245 | |
246 | virtual void init(); |
247 | }; |
248 | // Common base class for allocation regions used during GC. |
249 | class G1GCAllocRegion : public G1AllocRegion { |
250 | protected: |
251 | G1EvacStats* _stats; |
252 | G1HeapRegionAttr::region_type_t _purpose; |
253 | |
254 | virtual HeapRegion* allocate_new_region(size_t word_size, bool force); |
255 | virtual void retire_region(HeapRegion* alloc_region, size_t allocated_bytes); |
256 | |
257 | virtual size_t retire(bool fill_up); |
258 | |
259 | G1GCAllocRegion(const char* name, bool bot_updates, G1EvacStats* stats, G1HeapRegionAttr::region_type_t purpose) |
260 | : G1AllocRegion(name, bot_updates), _stats(stats), _purpose(purpose) { |
261 | assert(stats != NULL, "Must pass non-NULL PLAB statistics" ); |
262 | } |
263 | }; |
264 | |
265 | class SurvivorGCAllocRegion : public G1GCAllocRegion { |
266 | public: |
267 | SurvivorGCAllocRegion(G1EvacStats* stats) |
268 | : G1GCAllocRegion("Survivor GC Alloc Region" , false /* bot_updates */, stats, G1HeapRegionAttr::Young) { } |
269 | }; |
270 | |
271 | class OldGCAllocRegion : public G1GCAllocRegion { |
272 | public: |
273 | OldGCAllocRegion(G1EvacStats* stats) |
274 | : G1GCAllocRegion("Old GC Alloc Region" , true /* bot_updates */, stats, G1HeapRegionAttr::Old) { } |
275 | |
276 | // This specialization of release() makes sure that the last card that has |
277 | // been allocated into has been completely filled by a dummy object. This |
278 | // avoids races when remembered set scanning wants to update the BOT of the |
279 | // last card in the retained old gc alloc region, and allocation threads |
280 | // allocating into that card at the same time. |
281 | virtual HeapRegion* release(); |
282 | }; |
283 | |
284 | #endif // SHARE_GC_G1_G1ALLOCREGION_HPP |
285 | |