1/*
2 * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25#ifndef SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_HPP
26#define SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_HPP
27
28#include "gc/parallel/psPromotionLAB.hpp"
29#include "gc/shared/copyFailedInfo.hpp"
30#include "gc/shared/gcTrace.hpp"
31#include "gc/shared/preservedMarks.hpp"
32#include "gc/shared/taskqueue.hpp"
33#include "memory/padded.hpp"
34#include "utilities/globalDefinitions.hpp"
35
36//
37// psPromotionManager is used by a single thread to manage object survival
38// during a scavenge. The promotion manager contains thread local data only.
39//
40// NOTE! Be careful when allocating the stacks on cheap. If you are going
41// to use a promotion manager in more than one thread, the stacks MUST be
42// on cheap. This can lead to memory leaks, though, as they are not auto
43// deallocated.
44//
45// FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
46//
47
48class MutableSpace;
49class PSOldGen;
50class ParCompactionManager;
51
52class PSPromotionManager {
53 friend class PSScavenge;
54 friend class PSRefProcTaskExecutor;
55 private:
56 static PaddedEnd<PSPromotionManager>* _manager_array;
57 static OopStarTaskQueueSet* _stack_array_depth;
58 static PreservedMarksSet* _preserved_marks_set;
59 static PSOldGen* _old_gen;
60 static MutableSpace* _young_space;
61
62#if TASKQUEUE_STATS
63 size_t _masked_pushes;
64 size_t _masked_steals;
65 size_t _arrays_chunked;
66 size_t _array_chunks_processed;
67
68 void print_local_stats(outputStream* const out, uint i) const;
69 static void print_taskqueue_stats();
70
71 void reset_stats();
72#endif // TASKQUEUE_STATS
73
74 PSYoungPromotionLAB _young_lab;
75 PSOldPromotionLAB _old_lab;
76 bool _young_gen_is_full;
77 bool _old_gen_is_full;
78
79 OopStarTaskQueue _claimed_stack_depth;
80 OverflowTaskQueue<oop, mtGC> _claimed_stack_breadth;
81
82 bool _totally_drain;
83 uint _target_stack_size;
84
85 uint _array_chunk_size;
86 uint _min_array_size_for_chunking;
87
88 PreservedMarks* _preserved_marks;
89 PromotionFailedInfo _promotion_failed_info;
90
91 // Accessors
92 static PSOldGen* old_gen() { return _old_gen; }
93 static MutableSpace* young_space() { return _young_space; }
94
95 inline static PSPromotionManager* manager_array(uint index);
96 template <class T> inline void claim_or_forward_internal_depth(T* p);
97
98 // On the task queues we push reference locations as well as
99 // partially-scanned arrays (in the latter case, we push an oop to
100 // the from-space image of the array and the length on the
101 // from-space image indicates how many entries on the array we still
102 // need to scan; this is basically how ParNew does partial array
103 // scanning too). To be able to distinguish between reference
104 // locations and partially-scanned array oops we simply mask the
105 // latter oops with 0x01. The next three methods do the masking,
106 // unmasking, and checking whether the oop is masked or not. Notice
107 // that the signature of the mask and unmask methods looks a bit
108 // strange, as they accept and return different types (oop and
109 // oop*). This is because of the difference in types between what
110 // the task queue holds (oop*) and oops to partially-scanned arrays
111 // (oop). We do all the necessary casting in the mask / unmask
112 // methods to avoid sprinkling the rest of the code with more casts.
113
114 // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
115 // future masks) can't conflict with COMPRESSED_OOP_MASK
116#define PS_CHUNKED_ARRAY_OOP_MASK 0x2
117
118 bool is_oop_masked(StarTask p) {
119 // If something is marked chunked it's always treated like wide oop*
120 return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
121 PS_CHUNKED_ARRAY_OOP_MASK;
122 }
123
124 oop* mask_chunked_array_oop(oop obj) {
125 assert(!is_oop_masked((oop*) obj), "invariant");
126 oop* ret = (oop*) (cast_from_oop<uintptr_t>(obj) | PS_CHUNKED_ARRAY_OOP_MASK);
127 assert(is_oop_masked(ret), "invariant");
128 return ret;
129 }
130
131 oop unmask_chunked_array_oop(StarTask p) {
132 assert(is_oop_masked(p), "invariant");
133 assert(!p.is_narrow(), "chunked array oops cannot be narrow");
134 oop *chunk = (oop*)p; // cast p to oop (uses conversion operator)
135 oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
136 assert(!is_oop_masked((oop*) ret), "invariant");
137 return ret;
138 }
139
140 template <class T> void process_array_chunk_work(oop obj,
141 int start, int end);
142 void process_array_chunk(oop old);
143
144 template <class T> void push_depth(T* p);
145
146 inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size,
147 uint age, bool tenured,
148 const PSPromotionLAB* lab);
149
150 protected:
151 static OopStarTaskQueueSet* stack_array_depth() { return _stack_array_depth; }
152 public:
153 // Static
154 static void initialize();
155
156 static void pre_scavenge();
157 static bool post_scavenge(YoungGCTracer& gc_tracer);
158
159 static PSPromotionManager* gc_thread_promotion_manager(uint index);
160 static PSPromotionManager* vm_thread_promotion_manager();
161
162 static bool steal_depth(int queue_num, StarTask& t);
163
164 PSPromotionManager();
165
166 // Accessors
167 OopStarTaskQueue* claimed_stack_depth() {
168 return &_claimed_stack_depth;
169 }
170
171 bool young_gen_is_full() { return _young_gen_is_full; }
172
173 bool old_gen_is_full() { return _old_gen_is_full; }
174 void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
175
176 // Promotion methods
177 template<bool promote_immediately> oop copy_to_survivor_space(oop o);
178 oop oop_promotion_failed(oop obj, markOop obj_mark);
179
180 void reset();
181 void register_preserved_marks(PreservedMarks* preserved_marks);
182 static void restore_preserved_marks();
183
184 void flush_labs();
185 void drain_stacks(bool totally_drain) {
186 drain_stacks_depth(totally_drain);
187 }
188 public:
189 void drain_stacks_cond_depth() {
190 if (claimed_stack_depth()->size() > _target_stack_size) {
191 drain_stacks_depth(false);
192 }
193 }
194 void drain_stacks_depth(bool totally_drain);
195
196 bool stacks_empty() {
197 return claimed_stack_depth()->is_empty();
198 }
199
200 inline void process_popped_location_depth(StarTask p);
201
202 static bool should_scavenge(oop* p, bool check_to_space = false);
203 static bool should_scavenge(narrowOop* p, bool check_to_space = false);
204
205 template <class T, bool promote_immediately>
206 void copy_and_push_safe_barrier(T* p);
207
208 template <class T> inline void claim_or_forward_depth(T* p);
209
210 TASKQUEUE_STATS_ONLY(inline void record_steal(StarTask& p);)
211
212 void push_contents(oop obj);
213};
214
215#endif // SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_HPP
216