1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | |
6 | #ifndef GCIMPL_H_ |
7 | #define GCIMPL_H_ |
8 | |
9 | #ifdef SERVER_GC |
10 | #define MULTIPLE_HEAPS 1 |
11 | #endif // SERVER_GC |
12 | |
13 | #ifdef MULTIPLE_HEAPS |
14 | |
15 | #define PER_HEAP |
16 | |
17 | #else //MULTIPLE_HEAPS |
18 | |
19 | #define PER_HEAP static |
20 | |
21 | #endif // MULTIPLE_HEAPS |
22 | |
23 | #define PER_HEAP_ISOLATED static |
24 | |
25 | #if defined(WRITE_BARRIER_CHECK) && !defined (MULTIPLE_HEAPS) |
26 | void initGCShadow(); |
27 | void deleteGCShadow(); |
28 | void checkGCWriteBarrier(); |
29 | #else |
30 | inline void initGCShadow() {} |
31 | inline void deleteGCShadow() {} |
32 | inline void checkGCWriteBarrier() {} |
33 | #endif |
34 | |
35 | void GCProfileWalkHeap(); |
36 | |
37 | class gc_heap; |
38 | class CFinalize; |
39 | |
40 | extern bool g_fFinalizerRunOnShutDown; |
41 | extern bool g_built_with_svr_gc; |
42 | extern uint8_t g_build_variant; |
43 | extern VOLATILE(int32_t) g_no_gc_lock; |
44 | |
45 | class GCHeap : public IGCHeapInternal |
46 | { |
47 | protected: |
48 | |
49 | #ifdef MULTIPLE_HEAPS |
50 | gc_heap* pGenGCHeap; |
51 | #else |
52 | #define pGenGCHeap ((gc_heap*)0) |
53 | #endif //MULTIPLE_HEAPS |
54 | |
55 | friend class CFinalize; |
56 | friend class gc_heap; |
57 | friend struct ::alloc_context; |
58 | friend void EnterAllocLock(); |
59 | friend void LeaveAllocLock(); |
60 | friend void ProfScanRootsHelper(Object** object, ScanContext *pSC, uint32_t dwFlags); |
61 | friend void GCProfileWalkHeap(); |
62 | |
63 | public: |
64 | //In order to keep gc.cpp cleaner, ugly EE specific code is relegated to methods. |
65 | static void UpdatePreGCCounters(); |
66 | static void UpdatePostGCCounters(); |
67 | |
68 | public: |
69 | GCHeap(){}; |
70 | ~GCHeap(){}; |
71 | |
72 | /* BaseGCHeap Methods*/ |
73 | PER_HEAP_ISOLATED HRESULT Shutdown (); |
74 | |
75 | size_t GetTotalBytesInUse (); |
76 | // Gets the amount of bytes objects currently occupy on the GC heap. |
77 | size_t GetCurrentObjSize(); |
78 | |
79 | size_t GetLastGCStartTime(int generation); |
80 | size_t GetLastGCDuration(int generation); |
81 | size_t GetNow(); |
82 | |
83 | void DiagTraceGCSegments (); |
84 | void PublishObject(uint8_t* obj); |
85 | |
86 | bool IsGCInProgressHelper (bool bConsiderGCStart = false); |
87 | |
88 | uint32_t WaitUntilGCComplete (bool bConsiderGCStart = false); |
89 | |
90 | void SetGCInProgress(bool fInProgress); |
91 | |
92 | bool RuntimeStructuresValid(); |
93 | |
94 | void SetSuspensionPending(bool fSuspensionPending); |
95 | |
96 | void SetYieldProcessorScalingFactor(float yieldProcessorScalingFactor); |
97 | |
98 | void SetWaitForGCEvent(); |
99 | void ResetWaitForGCEvent(); |
100 | |
101 | HRESULT Initialize (); |
102 | |
103 | //flags can be GC_ALLOC_CONTAINS_REF GC_ALLOC_FINALIZE |
104 | Object* AllocAlign8 (gc_alloc_context* acontext, size_t size, uint32_t flags); |
105 | private: |
106 | Object* AllocAlign8Common (void* hp, alloc_context* acontext, size_t size, uint32_t flags); |
107 | public: |
108 | Object* AllocLHeap (size_t size, uint32_t flags); |
109 | Object* Alloc (gc_alloc_context* acontext, size_t size, uint32_t flags); |
110 | |
111 | void FixAllocContext (gc_alloc_context* acontext, void* arg, void *heap); |
112 | |
113 | Object* GetContainingObject(void *pInteriorPtr, bool fCollectedGenOnly); |
114 | |
115 | #ifdef MULTIPLE_HEAPS |
116 | static void AssignHeap (alloc_context* acontext); |
117 | static GCHeap* GetHeap (int); |
118 | #endif //MULTIPLE_HEAPS |
119 | |
120 | int GetHomeHeapNumber (); |
121 | bool IsThreadUsingAllocationContextHeap(gc_alloc_context* acontext, int thread_number); |
122 | int GetNumberOfHeaps (); |
123 | void HideAllocContext(alloc_context*); |
124 | void RevealAllocContext(alloc_context*); |
125 | |
126 | bool IsObjectInFixedHeap(Object *pObj); |
127 | |
128 | HRESULT GarbageCollect (int generation = -1, bool low_memory_p=false, int mode=collection_blocking); |
129 | |
130 | //// |
131 | // GC callback functions |
132 | // Check if an argument is promoted (ONLY CALL DURING |
133 | // THE PROMOTIONSGRANTED CALLBACK.) |
134 | bool IsPromoted (Object *object); |
135 | |
136 | size_t GetPromotedBytes (int heap_index); |
137 | |
138 | int CollectionCount (int generation, int get_bgc_fgc_count = 0); |
139 | |
140 | // promote an object |
141 | PER_HEAP_ISOLATED void Promote (Object** object, |
142 | ScanContext* sc, |
143 | uint32_t flags=0); |
144 | |
145 | // Find the relocation address for an object |
146 | PER_HEAP_ISOLATED void Relocate (Object** object, |
147 | ScanContext* sc, |
148 | uint32_t flags=0); |
149 | |
150 | |
151 | HRESULT Init (size_t heapSize); |
152 | |
153 | //Register an object for finalization |
154 | bool RegisterForFinalization (int gen, Object* obj); |
155 | |
156 | //Unregister an object for finalization |
157 | void SetFinalizationRun (Object* obj); |
158 | |
159 | //returns the generation number of an object (not valid during relocation) |
160 | unsigned WhichGeneration (Object* object); |
161 | // returns TRUE is the object is ephemeral |
162 | bool IsEphemeral (Object* object); |
163 | bool IsHeapPointer (void* object, bool small_heap_only = false); |
164 | |
165 | void ValidateObjectMember (Object *obj); |
166 | |
167 | PER_HEAP size_t ApproxTotalBytesInUse(BOOL small_heap_only = FALSE); |
168 | PER_HEAP size_t ApproxFreeBytes(); |
169 | |
170 | unsigned GetCondemnedGeneration(); |
171 | |
172 | void GetMemoryInfo(uint32_t* highMemLoadThreshold, |
173 | uint64_t* totalPhysicalMem, |
174 | uint32_t* lastRecordedMemLoad, |
175 | size_t* lastRecordedHeapSize, |
176 | size_t* lastRecordedFragmentation); |
177 | |
178 | int GetGcLatencyMode(); |
179 | int SetGcLatencyMode(int newLatencyMode); |
180 | |
181 | int GetLOHCompactionMode(); |
182 | void SetLOHCompactionMode(int newLOHCompactionyMode); |
183 | |
184 | bool RegisterForFullGCNotification(uint32_t gen2Percentage, |
185 | uint32_t lohPercentage); |
186 | bool CancelFullGCNotification(); |
187 | int WaitForFullGCApproach(int millisecondsTimeout); |
188 | int WaitForFullGCComplete(int millisecondsTimeout); |
189 | |
190 | int StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC); |
191 | int EndNoGCRegion(); |
192 | |
193 | unsigned GetGcCount(); |
194 | |
195 | Object* GetNextFinalizable() { return GetNextFinalizableObject(); }; |
196 | size_t GetNumberOfFinalizable() { return GetNumberFinalizableObjects(); } |
197 | |
198 | PER_HEAP_ISOLATED HRESULT GetGcCounters(int gen, gc_counters* counters); |
199 | |
200 | size_t GetValidSegmentSize(bool large_seg = false); |
201 | |
202 | static size_t GetValidGen0MaxSize(size_t seg_size); |
203 | |
204 | void SetReservedVMLimit (size_t vmlimit); |
205 | |
206 | PER_HEAP_ISOLATED Object* GetNextFinalizableObject(); |
207 | PER_HEAP_ISOLATED size_t GetNumberFinalizableObjects(); |
208 | PER_HEAP_ISOLATED size_t GetFinalizablePromotedCount(); |
209 | |
210 | void SetFinalizeQueueForShutdown(bool fHasLock); |
211 | bool FinalizeAppDomain(void *pDomain, bool fRunFinalizers); |
212 | bool ShouldRestartFinalizerWatchDog(); |
213 | |
214 | void DiagWalkObject (Object* obj, walk_fn fn, void* context); |
215 | void SetFinalizeRunOnShutdown(bool value); |
216 | |
217 | public: // FIX |
218 | |
219 | // Lock for finalization |
220 | PER_HEAP_ISOLATED |
221 | VOLATILE(int32_t) m_GCFLock; |
222 | |
223 | PER_HEAP_ISOLATED BOOL GcCollectClasses; |
224 | PER_HEAP_ISOLATED |
225 | VOLATILE(BOOL) GcInProgress; // used for syncing w/GC |
226 | PER_HEAP_ISOLATED VOLATILE(unsigned) GcCount; |
227 | PER_HEAP_ISOLATED unsigned GcCondemnedGeneration; |
228 | // calculated at the end of a GC. |
229 | PER_HEAP_ISOLATED size_t totalSurvivedSize; |
230 | |
231 | // Use only for GC tracing. |
232 | PER_HEAP unsigned int GcDuration; |
233 | |
234 | size_t GarbageCollectGeneration (unsigned int gen=0, gc_reason reason=reason_empty); |
235 | // Interface with gc_heap |
236 | size_t GarbageCollectTry (int generation, BOOL low_memory_p=FALSE, int mode=collection_blocking); |
237 | |
238 | // frozen segment management functions |
239 | virtual segment_handle RegisterFrozenSegment(segment_info *pseginfo); |
240 | virtual void UnregisterFrozenSegment(segment_handle seg); |
241 | |
242 | // Event control functions |
243 | void ControlEvents(GCEventKeyword keyword, GCEventLevel level); |
244 | void ControlPrivateEvents(GCEventKeyword keyword, GCEventLevel level); |
245 | |
246 | void WaitUntilConcurrentGCComplete (); // Use in managd threads |
247 | #ifndef DACCESS_COMPILE |
248 | HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout); // Use in native threads. TRUE if succeed. FALSE if failed or timeout |
249 | #endif |
250 | bool IsConcurrentGCInProgress(); |
251 | |
252 | // Enable/disable concurrent GC |
253 | void TemporaryEnableConcurrentGC(); |
254 | void TemporaryDisableConcurrentGC(); |
255 | bool IsConcurrentGCEnabled(); |
256 | |
257 | PER_HEAP_ISOLATED GCEvent *WaitForGCEvent; // used for syncing w/GC |
258 | |
259 | PER_HEAP_ISOLATED CFinalize* m_Finalize; |
260 | |
261 | PER_HEAP_ISOLATED gc_heap* Getgc_heap(); |
262 | |
263 | private: |
264 | static bool SafeToRestartManagedThreads() |
265 | { |
266 | // Note: this routine should return true when the last barrier |
267 | // to threads returning to cooperative mode is down after gc. |
268 | // In other words, if the sequence in GCHeap::RestartEE changes, |
269 | // the condition here may have to change as well. |
270 | return g_fSuspensionPending == 0; |
271 | } |
272 | public: |
273 | //return TRUE if GC actually happens, otherwise FALSE |
274 | bool StressHeap(gc_alloc_context * acontext); |
275 | |
276 | #ifndef FEATURE_REDHAWK // Redhawk forces relocation a different way |
277 | #ifdef STRESS_HEAP |
278 | protected: |
279 | |
280 | // only used in BACKGROUND_GC, but the symbol is not defined yet... |
281 | PER_HEAP_ISOLATED int gc_stress_fgcs_in_bgc; |
282 | |
283 | #if !defined(MULTIPLE_HEAPS) |
284 | // handles to hold the string objects that will force GC movement |
285 | enum { NUM_HEAP_STRESS_OBJS = 8 }; |
286 | PER_HEAP OBJECTHANDLE m_StressObjs[NUM_HEAP_STRESS_OBJS]; |
287 | PER_HEAP int m_CurStressObj; |
288 | #endif // !defined(MULTIPLE_HEAPS) |
289 | #endif // STRESS_HEAP |
290 | #endif // FEATURE_REDHAWK |
291 | |
292 | virtual void DiagDescrGenerations (gen_walk_fn fn, void *context); |
293 | |
294 | virtual void DiagWalkSurvivorsWithType (void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type); |
295 | |
296 | virtual void DiagWalkFinalizeQueue (void* gc_context, fq_walk_fn fn); |
297 | |
298 | virtual void DiagScanFinalizeQueue (fq_scan_fn fn, ScanContext* context); |
299 | |
300 | virtual void DiagScanHandles (handle_scan_fn fn, int gen_number, ScanContext* context); |
301 | |
302 | virtual void DiagScanDependentHandles (handle_scan_fn fn, int gen_number, ScanContext* context); |
303 | |
304 | virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p); |
305 | |
306 | public: |
307 | Object * NextObj (Object * object); |
308 | #if defined (FEATURE_BASICFREEZE) && defined (VERIFY_HEAP) |
309 | BOOL IsInFrozenSegment (Object * object); |
310 | #endif // defined (FEATURE_BASICFREEZE) && defined (VERIFY_HEAP) |
311 | }; |
312 | |
313 | #endif // GCIMPL_H_ |
314 | |