1 | /* |
2 | * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved. |
3 | * |
4 | * This code is free software; you can redistribute it and/or modify it |
5 | * under the terms of the GNU General Public License version 2 only, as |
6 | * published by the Free Software Foundation. |
7 | * |
8 | * This code is distributed in the hope that it will be useful, but WITHOUT |
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
11 | * version 2 for more details (a copy is included in the LICENSE file that |
12 | * accompanied this code). |
13 | * |
14 | * You should have received a copy of the GNU General Public License version |
15 | * 2 along with this work; if not, write to the Free Software Foundation, |
16 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
17 | * |
18 | * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
19 | * or visit www.oracle.com if you need additional information or have any |
20 | * questions. |
21 | * |
22 | */ |
23 | |
24 | #ifndef SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP |
25 | #define SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP |
26 | |
27 | #include "gc/shared/space.hpp" |
28 | #include "gc/shenandoah/shenandoahAllocRequest.hpp" |
29 | #include "gc/shenandoah/shenandoahAsserts.hpp" |
30 | #include "gc/shenandoah/shenandoahHeap.hpp" |
31 | #include "gc/shenandoah/shenandoahPacer.hpp" |
32 | #include "utilities/sizes.hpp" |
33 | |
34 | class VMStructs; |
35 | class ShenandoahHeapRegionStateConstant; |
36 | |
37 | class ShenandoahHeapRegion : public ContiguousSpace { |
38 | friend class VMStructs; |
39 | friend class ShenandoahHeapRegionStateConstant; |
40 | private: |
41 | /* |
42 | Region state is described by a state machine. Transitions are guarded by |
43 | heap lock, which allows changing the state of several regions atomically. |
44 | Region states can be logically aggregated in groups. |
45 | |
46 | "Empty": |
47 | ................................................................. |
48 | . . |
49 | . . |
50 | . Uncommitted <------- Committed <------------------------\ |
51 | . | | . | |
52 | . \---------v-----------/ . | |
53 | . | . | |
54 | .........................|....................................... | |
55 | | | |
56 | "Active": | | |
57 | .........................|....................................... | |
58 | . | . | |
59 | . /-----------------^-------------------\ . | |
60 | . | | . | |
61 | . v v "Humongous": . | |
62 | . Regular ---\-----\ ..................O................ . | |
63 | . | ^ | | . | . . | |
64 | . | | | | . *---------\ . . | |
65 | . v | | | . v v . . | |
66 | . Pinned Cset | . HStart <--> H/Start H/Cont . . | |
67 | . ^ / | | . Pinned v | . . | |
68 | . | / | | . *<--------/ . . | |
69 | . | v | | . | . . | |
70 | . CsetPinned | | ..................O................ . | |
71 | . | | | . | |
72 | . \-----\---v-------------------/ . | |
73 | . | . | |
74 | .........................|....................................... | |
75 | | | |
76 | "Trash": | | |
77 | .........................|....................................... | |
78 | . | . | |
79 | . v . | |
80 | . Trash ---------------------------------------/ |
81 | . . |
82 | . . |
83 | ................................................................. |
84 | |
85 | Transition from "Empty" to "Active" is first allocation. It can go from {Uncommitted, Committed} |
86 | to {Regular, "Humongous"}. The allocation may happen in Regular regions too, but not in Humongous. |
87 | |
88 | Transition from "Active" to "Trash" is reclamation. It can go from CSet during the normal cycle, |
89 | and from {Regular, "Humongous"} for immediate reclamation. The existence of Trash state allows |
90 | quick reclamation without actual cleaning up. |
91 | |
92 | Transition from "Trash" to "Empty" is recycling. It cleans up the regions and corresponding metadata. |
93 | Can be done asynchronously and in bulk. |
94 | |
95 | Note how internal transitions disallow logic bugs: |
96 | a) No region can go Empty, unless properly reclaimed/recycled; |
97 | b) No region can go Uncommitted, unless reclaimed/recycled first; |
98 | c) Only Regular regions can go to CSet; |
99 | d) Pinned cannot go Trash, thus it could never be reclaimed until unpinned; |
100 | e) Pinned cannot go CSet, thus it never moves; |
101 | f) Humongous cannot be used for regular allocations; |
102 | g) Humongous cannot go CSet, thus it never moves; |
103 | h) Humongous start can go pinned, and thus can be protected from moves (humongous continuations should |
104 | follow associated humongous starts, not pinnable/movable by themselves); |
105 | i) Empty cannot go Trash, avoiding useless work; |
106 | j) ... |
107 | */ |
108 | |
109 | enum RegionState { |
110 | _empty_uncommitted, // region is empty and has memory uncommitted |
111 | _empty_committed, // region is empty and has memory committed |
112 | _regular, // region is for regular allocations |
113 | _humongous_start, // region is the humongous start |
114 | _humongous_cont, // region is the humongous continuation |
115 | _pinned_humongous_start, // region is both humongous start and pinned |
116 | _cset, // region is in collection set |
117 | _pinned, // region is pinned |
118 | _pinned_cset, // region is pinned and in cset (evac failure path) |
119 | _trash, // region contains only trash |
120 | _REGION_STATES_NUM // last |
121 | }; |
122 | |
123 | static const char* region_state_to_string(RegionState s) { |
124 | switch (s) { |
125 | case _empty_uncommitted: return "Empty Uncommitted" ; |
126 | case _empty_committed: return "Empty Committed" ; |
127 | case _regular: return "Regular" ; |
128 | case _humongous_start: return "Humongous Start" ; |
129 | case _humongous_cont: return "Humongous Continuation" ; |
130 | case _pinned_humongous_start: return "Humongous Start, Pinned" ; |
131 | case _cset: return "Collection Set" ; |
132 | case _pinned: return "Pinned" ; |
133 | case _pinned_cset: return "Collection Set, Pinned" ; |
134 | case _trash: return "Trash" ; |
135 | default: |
136 | ShouldNotReachHere(); |
137 | return "" ; |
138 | } |
139 | } |
140 | |
141 | // This method protects from accidental changes in enum order: |
142 | int region_state_to_ordinal(RegionState s) const { |
143 | switch (s) { |
144 | case _empty_uncommitted: return 0; |
145 | case _empty_committed: return 1; |
146 | case _regular: return 2; |
147 | case _humongous_start: return 3; |
148 | case _humongous_cont: return 4; |
149 | case _cset: return 5; |
150 | case _pinned: return 6; |
151 | case _trash: return 7; |
152 | case _pinned_cset: return 8; |
153 | case _pinned_humongous_start: return 9; |
154 | default: |
155 | ShouldNotReachHere(); |
156 | return -1; |
157 | } |
158 | } |
159 | |
160 | void report_illegal_transition(const char* method); |
161 | |
162 | public: |
163 | static const int region_states_num() { |
164 | return _REGION_STATES_NUM; |
165 | } |
166 | |
167 | // Allowed transitions from the outside code: |
168 | void make_regular_allocation(); |
169 | void make_regular_bypass(); |
170 | void make_humongous_start(); |
171 | void make_humongous_cont(); |
172 | void make_humongous_start_bypass(); |
173 | void make_humongous_cont_bypass(); |
174 | void make_pinned(); |
175 | void make_unpinned(); |
176 | void make_cset(); |
177 | void make_trash(); |
178 | void make_trash_immediate(); |
179 | void make_empty(); |
180 | void make_uncommitted(); |
181 | void make_committed_bypass(); |
182 | |
183 | // Individual states: |
184 | bool is_empty_uncommitted() const { return _state == _empty_uncommitted; } |
185 | bool is_empty_committed() const { return _state == _empty_committed; } |
186 | bool is_regular() const { return _state == _regular; } |
187 | bool is_humongous_continuation() const { return _state == _humongous_cont; } |
188 | |
189 | // Participation in logical groups: |
190 | bool is_empty() const { return is_empty_committed() || is_empty_uncommitted(); } |
191 | bool is_active() const { return !is_empty() && !is_trash(); } |
192 | bool is_trash() const { return _state == _trash; } |
193 | bool is_humongous_start() const { return _state == _humongous_start || _state == _pinned_humongous_start; } |
194 | bool is_humongous() const { return is_humongous_start() || is_humongous_continuation(); } |
195 | bool is_committed() const { return !is_empty_uncommitted(); } |
196 | bool is_cset() const { return _state == _cset || _state == _pinned_cset; } |
197 | bool is_pinned() const { return _state == _pinned || _state == _pinned_cset || _state == _pinned_humongous_start; } |
198 | |
199 | // Macro-properties: |
200 | bool is_alloc_allowed() const { return is_empty() || is_regular() || _state == _pinned; } |
201 | bool is_move_allowed() const { return is_regular() || _state == _cset || (ShenandoahHumongousMoves && _state == _humongous_start); } |
202 | |
203 | RegionState state() const { return _state; } |
204 | int state_ordinal() const { return region_state_to_ordinal(_state); } |
205 | |
206 | private: |
207 | static size_t RegionCount; |
208 | static size_t RegionSizeBytes; |
209 | static size_t RegionSizeWords; |
210 | static size_t RegionSizeBytesShift; |
211 | static size_t RegionSizeWordsShift; |
212 | static size_t RegionSizeBytesMask; |
213 | static size_t RegionSizeWordsMask; |
214 | static size_t HumongousThresholdBytes; |
215 | static size_t HumongousThresholdWords; |
216 | static size_t MaxTLABSizeBytes; |
217 | static size_t MaxTLABSizeWords; |
218 | |
219 | // Global allocation counter, increased for each allocation under Shenandoah heap lock. |
220 | // Padded to avoid false sharing with the read-only fields above. |
221 | struct PaddedAllocSeqNum { |
222 | DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(uint64_t)); |
223 | uint64_t value; |
224 | DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0); |
225 | |
226 | PaddedAllocSeqNum() { |
227 | // start with 1, reserve 0 for uninitialized value |
228 | value = 1; |
229 | } |
230 | }; |
231 | |
232 | static PaddedAllocSeqNum _alloc_seq_num; |
233 | |
234 | // Never updated fields |
235 | ShenandoahHeap* _heap; |
236 | MemRegion _reserved; |
237 | size_t _region_number; |
238 | |
239 | // Rarely updated fields |
240 | HeapWord* _new_top; |
241 | size_t _critical_pins; |
242 | double _empty_time; |
243 | |
244 | // Seldom updated fields |
245 | RegionState _state; |
246 | |
247 | // Frequently updated fields |
248 | size_t _tlab_allocs; |
249 | size_t _gclab_allocs; |
250 | size_t _shared_allocs; |
251 | |
252 | uint64_t _seqnum_first_alloc_mutator; |
253 | uint64_t _seqnum_first_alloc_gc; |
254 | uint64_t _seqnum_last_alloc_mutator; |
255 | uint64_t _seqnum_last_alloc_gc; |
256 | |
257 | volatile size_t _live_data; |
258 | |
259 | // Claim some space at the end to protect next region |
260 | DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, 0); |
261 | |
262 | public: |
263 | ShenandoahHeapRegion(ShenandoahHeap* heap, HeapWord* start, size_t size_words, size_t index, bool committed); |
264 | |
265 | static const size_t MIN_NUM_REGIONS = 10; |
266 | |
267 | static void setup_sizes(size_t max_heap_size); |
268 | |
269 | double empty_time() { |
270 | return _empty_time; |
271 | } |
272 | |
273 | inline static size_t required_regions(size_t bytes) { |
274 | return (bytes + ShenandoahHeapRegion::region_size_bytes() - 1) >> ShenandoahHeapRegion::region_size_bytes_shift(); |
275 | } |
276 | |
277 | inline static size_t region_count() { |
278 | return ShenandoahHeapRegion::RegionCount; |
279 | } |
280 | |
281 | inline static size_t region_size_bytes() { |
282 | return ShenandoahHeapRegion::RegionSizeBytes; |
283 | } |
284 | |
285 | inline static size_t region_size_words() { |
286 | return ShenandoahHeapRegion::RegionSizeWords; |
287 | } |
288 | |
289 | inline static size_t region_size_bytes_shift() { |
290 | return ShenandoahHeapRegion::RegionSizeBytesShift; |
291 | } |
292 | |
293 | inline static size_t region_size_words_shift() { |
294 | return ShenandoahHeapRegion::RegionSizeWordsShift; |
295 | } |
296 | |
297 | inline static size_t region_size_bytes_mask() { |
298 | return ShenandoahHeapRegion::RegionSizeBytesMask; |
299 | } |
300 | |
301 | inline static size_t region_size_words_mask() { |
302 | return ShenandoahHeapRegion::RegionSizeWordsMask; |
303 | } |
304 | |
305 | // Convert to jint with sanity checking |
306 | inline static jint region_size_bytes_jint() { |
307 | assert (ShenandoahHeapRegion::RegionSizeBytes <= (size_t)max_jint, "sanity" ); |
308 | return (jint)ShenandoahHeapRegion::RegionSizeBytes; |
309 | } |
310 | |
311 | // Convert to jint with sanity checking |
312 | inline static jint region_size_words_jint() { |
313 | assert (ShenandoahHeapRegion::RegionSizeWords <= (size_t)max_jint, "sanity" ); |
314 | return (jint)ShenandoahHeapRegion::RegionSizeWords; |
315 | } |
316 | |
317 | // Convert to jint with sanity checking |
318 | inline static jint region_size_bytes_shift_jint() { |
319 | assert (ShenandoahHeapRegion::RegionSizeBytesShift <= (size_t)max_jint, "sanity" ); |
320 | return (jint)ShenandoahHeapRegion::RegionSizeBytesShift; |
321 | } |
322 | |
323 | // Convert to jint with sanity checking |
324 | inline static jint region_size_words_shift_jint() { |
325 | assert (ShenandoahHeapRegion::RegionSizeWordsShift <= (size_t)max_jint, "sanity" ); |
326 | return (jint)ShenandoahHeapRegion::RegionSizeWordsShift; |
327 | } |
328 | |
329 | inline static size_t humongous_threshold_bytes() { |
330 | return ShenandoahHeapRegion::HumongousThresholdBytes; |
331 | } |
332 | |
333 | inline static size_t humongous_threshold_words() { |
334 | return ShenandoahHeapRegion::HumongousThresholdWords; |
335 | } |
336 | |
337 | inline static size_t max_tlab_size_bytes() { |
338 | return ShenandoahHeapRegion::MaxTLABSizeBytes; |
339 | } |
340 | |
341 | inline static size_t max_tlab_size_words() { |
342 | return ShenandoahHeapRegion::MaxTLABSizeWords; |
343 | } |
344 | |
345 | static uint64_t seqnum_current_alloc() { |
346 | // Last used seq number |
347 | return _alloc_seq_num.value - 1; |
348 | } |
349 | |
350 | size_t region_number() const; |
351 | |
352 | // Allocation (return NULL if full) |
353 | inline HeapWord* allocate(size_t word_size, ShenandoahAllocRequest::Type type); |
354 | |
355 | HeapWord* allocate(size_t word_size) shenandoah_not_implemented_return(NULL) |
356 | |
357 | void clear_live_data(); |
358 | void set_live_data(size_t s); |
359 | |
360 | // Increase live data for newly allocated region |
361 | inline void increase_live_data_alloc_words(size_t s); |
362 | |
363 | // Increase live data for region scanned with GC |
364 | inline void increase_live_data_gc_words(size_t s); |
365 | |
366 | bool has_live() const; |
367 | size_t get_live_data_bytes() const; |
368 | size_t get_live_data_words() const; |
369 | |
370 | void print_on(outputStream* st) const; |
371 | |
372 | size_t garbage() const; |
373 | |
374 | void recycle(); |
375 | |
376 | void oop_iterate(OopIterateClosure* cl); |
377 | |
378 | HeapWord* block_start_const(const void* p) const; |
379 | |
380 | bool in_collection_set() const; |
381 | |
382 | // Find humongous start region that this region belongs to |
383 | ShenandoahHeapRegion* humongous_start_region() const; |
384 | |
385 | CompactibleSpace* next_compaction_space() const shenandoah_not_implemented_return(NULL); |
386 | void prepare_for_compaction(CompactPoint* cp) shenandoah_not_implemented; |
387 | void adjust_pointers() shenandoah_not_implemented; |
388 | void compact() shenandoah_not_implemented; |
389 | |
390 | void set_new_top(HeapWord* new_top) { _new_top = new_top; } |
391 | HeapWord* new_top() const { return _new_top; } |
392 | |
393 | inline void adjust_alloc_metadata(ShenandoahAllocRequest::Type type, size_t); |
394 | void reset_alloc_metadata_to_shared(); |
395 | void reset_alloc_metadata(); |
396 | size_t get_shared_allocs() const; |
397 | size_t get_tlab_allocs() const; |
398 | size_t get_gclab_allocs() const; |
399 | |
400 | uint64_t seqnum_first_alloc() const { |
401 | if (_seqnum_first_alloc_mutator == 0) return _seqnum_first_alloc_gc; |
402 | if (_seqnum_first_alloc_gc == 0) return _seqnum_first_alloc_mutator; |
403 | return MIN2(_seqnum_first_alloc_mutator, _seqnum_first_alloc_gc); |
404 | } |
405 | |
406 | uint64_t seqnum_last_alloc() const { |
407 | return MAX2(_seqnum_last_alloc_mutator, _seqnum_last_alloc_gc); |
408 | } |
409 | |
410 | uint64_t seqnum_first_alloc_mutator() const { |
411 | return _seqnum_first_alloc_mutator; |
412 | } |
413 | |
414 | uint64_t seqnum_last_alloc_mutator() const { |
415 | return _seqnum_last_alloc_mutator; |
416 | } |
417 | |
418 | uint64_t seqnum_first_alloc_gc() const { |
419 | return _seqnum_first_alloc_gc; |
420 | } |
421 | |
422 | uint64_t seqnum_last_alloc_gc() const { |
423 | return _seqnum_last_alloc_gc; |
424 | } |
425 | |
426 | private: |
427 | void do_commit(); |
428 | void do_uncommit(); |
429 | |
430 | void oop_iterate_objects(OopIterateClosure* cl); |
431 | void oop_iterate_humongous(OopIterateClosure* cl); |
432 | |
433 | inline void internal_increase_live_data(size_t s); |
434 | |
435 | void set_state(RegionState to); |
436 | }; |
437 | |
438 | #endif // SHARE_GC_SHENANDOAH_SHENANDOAHHEAPREGION_HPP |
439 | |