1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | |
6 | /*++ |
7 | |
8 | Module Name: |
9 | |
10 | gc.h |
11 | |
12 | --*/ |
13 | |
14 | #ifndef __GC_H |
15 | #define __GC_H |
16 | |
17 | #include "gcinterface.h" |
18 | #include "env/gcenv.os.h" |
19 | |
20 | #ifdef BUILD_AS_STANDALONE |
21 | #include "gcenv.ee.standalone.inl" |
22 | |
23 | // GCStress does not currently work with Standalone GC |
24 | #ifdef STRESS_HEAP |
25 | #undef STRESS_HEAP |
26 | #endif // STRESS_HEAP |
27 | #else |
28 | #include "env/gcenv.ee.h" |
29 | #endif // BUILD_AS_STANDALONE |
30 | #include "gcconfig.h" |
31 | |
32 | /* |
33 | * Promotion Function Prototypes |
34 | */ |
35 | typedef void enum_func (Object*); |
36 | |
37 | // callback functions for heap walkers |
38 | typedef void object_callback_func(void * pvContext, void * pvDataLoc); |
39 | |
40 | struct fgm_history |
41 | { |
42 | failure_get_memory fgm; |
43 | size_t size; |
44 | size_t available_pagefile_mb; |
45 | BOOL loh_p; |
46 | |
47 | void set_fgm (failure_get_memory f, size_t s, BOOL l) |
48 | { |
49 | fgm = f; |
50 | size = s; |
51 | loh_p = l; |
52 | } |
53 | }; |
54 | |
55 | // These values should be in sync with the GC_REASONs (in eventtrace.h) used for ETW. |
56 | // TODO : it would be easier to make this an ORed value |
57 | enum gc_reason |
58 | { |
59 | reason_alloc_soh = 0, |
60 | reason_induced = 1, |
61 | reason_lowmemory = 2, |
62 | reason_empty = 3, |
63 | reason_alloc_loh = 4, |
64 | reason_oos_soh = 5, |
65 | reason_oos_loh = 6, |
66 | reason_induced_noforce = 7, // it's an induced GC and doesn't have to be blocking. |
67 | reason_gcstress = 8, // this turns into reason_induced & gc_mechanisms.stress_induced = true |
68 | reason_lowmemory_blocking = 9, |
69 | reason_induced_compacting = 10, |
70 | reason_lowmemory_host = 11, |
71 | reason_pm_full_gc = 12, // provisional mode requested to trigger full GC |
72 | reason_lowmemory_host_blocking = 13, |
73 | reason_max |
74 | }; |
75 | |
76 | // Types of GCs, emitted by the GCStart ETW event. |
77 | enum gc_etw_type |
78 | { |
79 | gc_etw_type_ngc = 0, |
80 | gc_etw_type_bgc = 1, |
81 | gc_etw_type_fgc = 2 |
82 | }; |
83 | |
84 | // Types of segments, emitted by the GCCreateSegment ETW event. |
85 | enum gc_etw_segment_type |
86 | { |
87 | gc_etw_segment_small_object_heap = 0, |
88 | gc_etw_segment_large_object_heap = 1, |
89 | gc_etw_segment_read_only_heap = 2 |
90 | }; |
91 | |
92 | // Types of allocations, emitted by the GCAllocationTick ETW event. |
93 | enum gc_etw_alloc_kind |
94 | { |
95 | gc_etw_alloc_soh = 0, |
96 | gc_etw_alloc_loh = 1 |
97 | }; |
98 | |
99 | /* forward declerations */ |
100 | class ; |
101 | class Object; |
102 | |
103 | class IGCHeapInternal; |
104 | |
105 | /* misc defines */ |
106 | #define LARGE_OBJECT_SIZE ((size_t)(85000)) |
107 | #define max_generation 2 |
108 | |
109 | #ifdef GC_CONFIG_DRIVEN |
110 | #define MAX_GLOBAL_GC_MECHANISMS_COUNT 6 |
111 | extern size_t gc_global_mechanisms[MAX_GLOBAL_GC_MECHANISMS_COUNT]; |
112 | #endif //GC_CONFIG_DRIVEN |
113 | |
114 | #ifdef DACCESS_COMPILE |
115 | class DacHeapWalker; |
116 | #endif |
117 | |
118 | #ifdef _DEBUG |
119 | #define _LOGALLOC |
120 | #endif |
121 | |
122 | #define MP_LOCKS |
123 | |
124 | #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES |
125 | extern "C" uint32_t* g_gc_card_bundle_table; |
126 | #endif |
127 | |
128 | #if defined(ENABLE_PERF_COUNTERS) || defined(FEATURE_EVENT_TRACE) |
129 | // Note this is not updated in a thread safe way so the value may not be accurate. We get |
130 | // it accurately in full GCs if the handle count is requested. |
131 | extern DWORD g_dwHandles; |
132 | #endif // ENABLE_PERF_COUNTERS || FEATURE_EVENT_TRACE |
133 | |
134 | extern "C" uint32_t* g_gc_card_table; |
135 | extern "C" uint8_t* g_gc_lowest_address; |
136 | extern "C" uint8_t* g_gc_highest_address; |
137 | extern "C" GCHeapType g_gc_heap_type; |
138 | extern "C" uint32_t g_max_generation; |
139 | extern "C" MethodTable* g_gc_pFreeObjectMethodTable; |
140 | extern "C" uint32_t g_num_processors; |
141 | |
142 | extern VOLATILE(int32_t) g_fSuspensionPending; |
143 | |
144 | extern uint32_t g_yieldProcessorScalingFactor; |
145 | |
146 | ::IGCHandleManager* CreateGCHandleManager(); |
147 | |
148 | namespace WKS { |
149 | ::IGCHeapInternal* CreateGCHeap(); |
150 | class GCHeap; |
151 | class gc_heap; |
152 | } |
153 | |
154 | #if defined(FEATURE_SVR_GC) |
155 | namespace SVR { |
156 | ::IGCHeapInternal* CreateGCHeap(); |
157 | class GCHeap; |
158 | class gc_heap; |
159 | } |
160 | #endif // defined(FEATURE_SVR_GC) |
161 | |
162 | #ifdef STRESS_HEAP |
163 | #define IN_STRESS_HEAP(x) x |
164 | #define STRESS_HEAP_ARG(x) ,x |
165 | #else // STRESS_HEAP |
166 | #define IN_STRESS_HEAP(x) |
167 | #define STRESS_HEAP_ARG(x) |
168 | #endif // STRESS_HEAP |
169 | |
170 | //dynamic data interface |
171 | struct gc_counters |
172 | { |
173 | size_t current_size; |
174 | size_t promoted_size; |
175 | size_t collection_count; |
176 | }; |
177 | |
178 | enum bgc_state |
179 | { |
180 | bgc_not_in_process = 0, |
181 | bgc_initialized, |
182 | bgc_reset_ww, |
183 | bgc_mark_handles, |
184 | bgc_mark_stack, |
185 | bgc_revisit_soh, |
186 | bgc_revisit_loh, |
187 | bgc_overflow_soh, |
188 | bgc_overflow_loh, |
189 | bgc_final_marking, |
190 | bgc_sweep_soh, |
191 | bgc_sweep_loh, |
192 | bgc_plan_phase |
193 | }; |
194 | |
195 | enum changed_seg_state |
196 | { |
197 | seg_deleted, |
198 | seg_added |
199 | }; |
200 | |
201 | void record_changed_seg (uint8_t* start, uint8_t* end, |
202 | size_t current_gc_index, |
203 | bgc_state current_bgc_state, |
204 | changed_seg_state changed_state); |
205 | |
206 | #ifdef GC_CONFIG_DRIVEN |
207 | void record_global_mechanism (int mech_index); |
208 | #endif //GC_CONFIG_DRIVEN |
209 | |
210 | struct alloc_context : gc_alloc_context |
211 | { |
212 | #ifdef FEATURE_SVR_GC |
213 | inline SVR::GCHeap* get_alloc_heap() |
214 | { |
215 | return static_cast<SVR::GCHeap*>(gc_reserved_1); |
216 | } |
217 | |
218 | inline void set_alloc_heap(SVR::GCHeap* heap) |
219 | { |
220 | gc_reserved_1 = heap; |
221 | } |
222 | |
223 | inline SVR::GCHeap* get_home_heap() |
224 | { |
225 | return static_cast<SVR::GCHeap*>(gc_reserved_2); |
226 | } |
227 | |
228 | inline void set_home_heap(SVR::GCHeap* heap) |
229 | { |
230 | gc_reserved_2 = heap; |
231 | } |
232 | #endif // FEATURE_SVR_GC |
233 | }; |
234 | |
235 | class IGCHeapInternal : public IGCHeap { |
236 | public: |
237 | |
238 | virtual ~IGCHeapInternal() {} |
239 | |
240 | private: |
241 | virtual Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, uint32_t flags) = 0; |
242 | public: |
243 | virtual int GetNumberOfHeaps () = 0; |
244 | virtual int GetHomeHeapNumber () = 0; |
245 | virtual size_t GetPromotedBytes(int heap_index) = 0; |
246 | |
247 | unsigned GetMaxGeneration() |
248 | { |
249 | return max_generation; |
250 | } |
251 | |
252 | bool IsValidSegmentSize(size_t cbSize) |
253 | { |
254 | //Must be aligned on a Mb and greater than 4Mb |
255 | return (((cbSize & (1024*1024-1)) ==0) && (cbSize >> 22)); |
256 | } |
257 | |
258 | bool IsValidGen0MaxSize(size_t cbSize) |
259 | { |
260 | return (cbSize >= 64*1024); |
261 | } |
262 | |
263 | BOOL IsLargeObject(MethodTable *mt) |
264 | { |
265 | return mt->GetBaseSize() >= LARGE_OBJECT_SIZE; |
266 | } |
267 | |
268 | protected: |
269 | public: |
270 | #if defined(FEATURE_BASICFREEZE) && defined(VERIFY_HEAP) |
271 | // Return TRUE if object lives in frozen segment |
272 | virtual BOOL IsInFrozenSegment (Object * object) = 0; |
273 | #endif // defined(FEATURE_BASICFREEZE) && defined(VERIFY_HEAP) |
274 | }; |
275 | |
276 | // Go through and touch (read) each page straddled by a memory block. |
277 | void TouchPages(void * pStart, size_t cb); |
278 | |
279 | #ifdef WRITE_BARRIER_CHECK |
280 | void updateGCShadow(Object** ptr, Object* val); |
281 | #endif |
282 | |
283 | #ifndef DACCESS_COMPILE |
284 | // The single GC heap instance, shared with the VM. |
285 | extern IGCHeapInternal* g_theGCHeap; |
286 | |
287 | // The single GC handle manager instance, shared with the VM. |
288 | extern IGCHandleManager* g_theGCHandleManager; |
289 | #endif // DACCESS_COMPILE |
290 | |
291 | #ifndef DACCESS_COMPILE |
292 | inline bool IsGCInProgress(bool bConsiderGCStart = false) |
293 | { |
294 | return g_theGCHeap != nullptr ? g_theGCHeap->IsGCInProgressHelper(bConsiderGCStart) : false; |
295 | } |
296 | #endif // DACCESS_COMPILE |
297 | |
298 | inline bool IsServerHeap() |
299 | { |
300 | #ifdef FEATURE_SVR_GC |
301 | assert(g_gc_heap_type != GC_HEAP_INVALID); |
302 | return g_gc_heap_type == GC_HEAP_SVR; |
303 | #else // FEATURE_SVR_GC |
304 | return false; |
305 | #endif // FEATURE_SVR_GC |
306 | } |
307 | |
308 | #endif // __GC_H |
309 | |