1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4
5#ifndef _GC_INTERFACE_H_
6#define _GC_INTERFACE_H_
7
8// The major version of the GC/EE interface. Breaking changes to this interface
9// require bumps in the major version number.
10#define GC_INTERFACE_MAJOR_VERSION 2
11
12// The minor version of the GC/EE interface. Non-breaking changes are required
13// to bump the minor version number. GCs and EEs with minor version number
14// mismatches can still interopate correctly, with some care.
15#define GC_INTERFACE_MINOR_VERSION 1
16
17struct ScanContext;
18struct gc_alloc_context;
19class CrawlFrame;
20
21// Callback passed to GcScanRoots.
22typedef void promote_func(PTR_PTR_Object, ScanContext*, uint32_t);
23
24// Callback passed to GcEnumAllocContexts.
25typedef void enum_alloc_context_func(gc_alloc_context*, void*);
26
27// Callback passed to CreateBackgroundThread.
28typedef uint32_t (__stdcall *GCBackgroundThreadFunction)(void* param);
29
30// Struct often used as a parameter to callbacks.
31typedef struct
32{
33 promote_func* f;
34 ScanContext* sc;
35 CrawlFrame * cf;
36} GCCONTEXT;
37
38// SUSPEND_REASON is the reason why the GC wishes to suspend the EE,
39// used as an argument to IGCToCLR::SuspendEE.
40typedef enum
41{
42 SUSPEND_FOR_GC = 1,
43 SUSPEND_FOR_GC_PREP = 6
44} SUSPEND_REASON;
45
46typedef enum
47{
48 walk_for_gc = 1,
49 walk_for_bgc = 2,
50 walk_for_loh = 3
51} walk_surv_type;
52
53// Different operations that can be done by GCToEEInterface::StompWriteBarrier
54enum class WriteBarrierOp
55{
56 StompResize,
57 StompEphemeral,
58 Initialize,
59 SwitchToWriteWatch,
60 SwitchToNonWriteWatch
61};
62
63// Arguments to GCToEEInterface::StompWriteBarrier
64struct WriteBarrierParameters
65{
66 // The operation that StompWriteBarrier will perform.
67 WriteBarrierOp operation;
68
69 // Whether or not the runtime is currently suspended. If it is not,
70 // the EE will need to suspend it before bashing the write barrier.
71 // Used for all operations.
72 bool is_runtime_suspended;
73
74 // Whether or not the GC has moved the ephemeral generation to no longer
75 // be at the top of the heap. When the ephemeral generation is at the top
76 // of the heap, and the write barrier observes that a pointer is greater than
77 // g_ephemeral_low, it does not need to check that the pointer is less than
78 // g_ephemeral_high because there is nothing in the GC heap above the ephemeral
79 // generation. When this is not the case, however, the GC must inform the EE
80 // so that the EE can switch to a write barrier that checks that a pointer
81 // is both greater than g_ephemeral_low and less than g_ephemeral_high.
82 // Used for WriteBarrierOp::StompResize.
83 bool requires_upper_bounds_check;
84
85 // The new card table location. May or may not be the same as the previous
86 // card table. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize.
87 uint32_t* card_table;
88
89 // The new card bundle table location. May or may not be the same as the previous
90 // card bundle table. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize.
91 uint32_t* card_bundle_table;
92
93 // The heap's new low boundary. May or may not be the same as the previous
94 // value. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize.
95 uint8_t* lowest_address;
96
97 // The heap's new high boundary. May or may not be the same as the previous
98 // value. Used for WriteBarrierOp::Initialize and WriteBarrierOp::StompResize.
99 uint8_t* highest_address;
100
101 // The new start of the ephemeral generation.
102 // Used for WriteBarrierOp::StompEphemeral.
103 uint8_t* ephemeral_low;
104
105 // The new end of the ephemeral generation.
106 // Used for WriteBarrierOp::StompEphemeral.
107 uint8_t* ephemeral_high;
108
109 // The new write watch table, if we are using our own write watch
110 // implementation. Used for WriteBarrierOp::SwitchToWriteWatch only.
111 uint8_t* write_watch_table;
112};
113
114// Opaque type for tracking object pointers
115#ifndef DACCESS_COMPILE
116struct OBJECTHANDLE__
117{
118 void* unused;
119};
120typedef struct OBJECTHANDLE__* OBJECTHANDLE;
121#else
122typedef uintptr_t OBJECTHANDLE;
123#endif
124
125 /*
126 * Scanning callback.
127 */
128typedef void (CALLBACK *HANDLESCANPROC)(PTR_UNCHECKED_OBJECTREF pref, uintptr_t *pExtraInfo, uintptr_t param1, uintptr_t param2);
129
130#include "gcinterface.ee.h"
131
132// The allocation context must be known to the VM for use in the allocation
133// fast path and known to the GC for performing the allocation. Every Thread
134// has its own allocation context that it hands to the GC when allocating.
135struct gc_alloc_context
136{
137 uint8_t* alloc_ptr;
138 uint8_t* alloc_limit;
139 int64_t alloc_bytes; //Number of bytes allocated on SOH by this context
140 int64_t alloc_bytes_loh; //Number of bytes allocated on LOH by this context
141 // These two fields are deliberately not exposed past the EE-GC interface.
142 void* gc_reserved_1;
143 void* gc_reserved_2;
144 int alloc_count;
145public:
146
147 void init()
148 {
149 LIMITED_METHOD_CONTRACT;
150
151 alloc_ptr = 0;
152 alloc_limit = 0;
153 alloc_bytes = 0;
154 alloc_bytes_loh = 0;
155 gc_reserved_1 = 0;
156 gc_reserved_2 = 0;
157 alloc_count = 0;
158 }
159};
160
161#include "gcinterface.dac.h"
162
163// stub type to abstract a heap segment
164struct gc_heap_segment_stub;
165typedef gc_heap_segment_stub *segment_handle;
166
167struct segment_info
168{
169 void * pvMem; // base of the allocation, not the first object (must add ibFirstObject)
170 size_t ibFirstObject; // offset to the base of the first object in the segment
171 size_t ibAllocated; // limit of allocated memory in the segment (>= firstobject)
172 size_t ibCommit; // limit of committed memory in the segment (>= allocated)
173 size_t ibReserved; // limit of reserved memory in the segment (>= commit)
174};
175
176#ifdef PROFILING_SUPPORTED
177#define GC_PROFILING //Turn on profiling
178#endif // PROFILING_SUPPORTED
179
180#define LARGE_OBJECT_SIZE ((size_t)(85000))
181
182// The minimum size of an object is three pointers wide: one for the syncblock,
183// one for the object header, and one for the first field in the object.
184#define min_obj_size ((sizeof(uint8_t*) + sizeof(uintptr_t) + sizeof(size_t)))
185
186// The bit shift used to convert a memory address into an index into the
187// Software Write Watch table.
188#define SOFTWARE_WRITE_WATCH_AddressToTableByteIndexShift 0xc
189
190class Object;
191class IGCHeap;
192class IGCHandleManager;
193
194#ifdef WRITE_BARRIER_CHECK
195//always defined, but should be 0 in Server GC
196extern uint8_t* g_GCShadow;
197extern uint8_t* g_GCShadowEnd;
198// saves the g_lowest_address in between GCs to verify the consistency of the shadow segment
199extern uint8_t* g_shadow_lowest_address;
200#endif
201
202// Event levels corresponding to events that can be fired by the GC.
203enum GCEventLevel
204{
205 GCEventLevel_None = 0,
206 GCEventLevel_Fatal = 1,
207 GCEventLevel_Error = 2,
208 GCEventLevel_Warning = 3,
209 GCEventLevel_Information = 4,
210 GCEventLevel_Verbose = 5,
211 GCEventLevel_Max = 6,
212 GCEventLevel_LogAlways = 255
213};
214
215// Event keywords corresponding to events that can be fired by the GC. These
216// numbers come from the ETW manifest itself - please make changes to this enum
217// if you add, remove, or change keyword sets that are used by the GC!
218enum GCEventKeyword
219{
220 GCEventKeyword_None = 0x0,
221 GCEventKeyword_GC = 0x1,
222 // Duplicate on purpose, GCPrivate is the same keyword as GC,
223 // with a different provider
224 GCEventKeyword_GCPrivate = 0x1,
225 GCEventKeyword_GCHandle = 0x2,
226 GCEventKeyword_GCHandlePrivate = 0x4000,
227 GCEventKeyword_GCHeapDump = 0x100000,
228 GCEventKeyword_GCSampledObjectAllocationHigh = 0x200000,
229 GCEventKeyword_GCHeapSurvivalAndMovement = 0x400000,
230 GCEventKeyword_GCHeapCollect = 0x800000,
231 GCEventKeyword_GCHeapAndTypeNames = 0x1000000,
232 GCEventKeyword_GCSampledObjectAllocationLow = 0x2000000,
233 GCEventKeyword_All = GCEventKeyword_GC
234 | GCEventKeyword_GCPrivate
235 | GCEventKeyword_GCHandle
236 | GCEventKeyword_GCHandlePrivate
237 | GCEventKeyword_GCHeapDump
238 | GCEventKeyword_GCSampledObjectAllocationHigh
239 | GCEventKeyword_GCHeapDump
240 | GCEventKeyword_GCSampledObjectAllocationHigh
241 | GCEventKeyword_GCHeapSurvivalAndMovement
242 | GCEventKeyword_GCHeapCollect
243 | GCEventKeyword_GCHeapAndTypeNames
244 | GCEventKeyword_GCSampledObjectAllocationLow
245};
246
247// !!!!!!!!!!!!!!!!!!!!!!!
248// make sure you change the def in bcl\system\gc.cs
249// if you change this!
250enum collection_mode
251{
252 collection_non_blocking = 0x00000001,
253 collection_blocking = 0x00000002,
254 collection_optimized = 0x00000004,
255 collection_compacting = 0x00000008
256#ifdef STRESS_HEAP
257 , collection_gcstress = 0x80000000
258#endif // STRESS_HEAP
259};
260
261// !!!!!!!!!!!!!!!!!!!!!!!
262// make sure you change the def in bcl\system\gc.cs
263// if you change this!
264enum wait_full_gc_status
265{
266 wait_full_gc_success = 0,
267 wait_full_gc_failed = 1,
268 wait_full_gc_cancelled = 2,
269 wait_full_gc_timeout = 3,
270 wait_full_gc_na = 4
271};
272
273// !!!!!!!!!!!!!!!!!!!!!!!
274// make sure you change the def in bcl\system\gc.cs
275// if you change this!
276enum start_no_gc_region_status
277{
278 start_no_gc_success = 0,
279 start_no_gc_no_memory = 1,
280 start_no_gc_too_large = 2,
281 start_no_gc_in_progress = 3
282};
283
284enum end_no_gc_region_status
285{
286 end_no_gc_success = 0,
287 end_no_gc_not_in_progress = 1,
288 end_no_gc_induced = 2,
289 end_no_gc_alloc_exceeded = 3
290};
291
292typedef enum
293{
294 /*
295 * WEAK HANDLES
296 *
297 * Weak handles are handles that track an object as long as it is alive,
298 * but do not keep the object alive if there are no strong references to it.
299 *
300 */
301
302 /*
303 * SHORT-LIVED WEAK HANDLES
304 *
305 * Short-lived weak handles are weak handles that track an object until the
306 * first time it is detected to be unreachable. At this point, the handle is
307 * severed, even if the object will be visible from a pending finalization
308 * graph. This further implies that short weak handles do not track
309 * across object resurrections.
310 *
311 */
312 HNDTYPE_WEAK_SHORT = 0,
313
314 /*
315 * LONG-LIVED WEAK HANDLES
316 *
317 * Long-lived weak handles are weak handles that track an object until the
318 * object is actually reclaimed. Unlike short weak handles, long weak handles
319 * continue to track their referents through finalization and across any
320 * resurrections that may occur.
321 *
322 */
323 HNDTYPE_WEAK_LONG = 1,
324 HNDTYPE_WEAK_DEFAULT = 1,
325
326 /*
327 * STRONG HANDLES
328 *
329 * Strong handles are handles which function like a normal object reference.
330 * The existence of a strong handle for an object will cause the object to
331 * be promoted (remain alive) through a garbage collection cycle.
332 *
333 */
334 HNDTYPE_STRONG = 2,
335 HNDTYPE_DEFAULT = 2,
336
337 /*
338 * PINNED HANDLES
339 *
340 * Pinned handles are strong handles which have the added property that they
341 * prevent an object from moving during a garbage collection cycle. This is
342 * useful when passing a pointer to object innards out of the runtime while GC
343 * may be enabled.
344 *
345 * NOTE: PINNING AN OBJECT IS EXPENSIVE AS IT PREVENTS THE GC FROM ACHIEVING
346 * OPTIMAL PACKING OF OBJECTS DURING EPHEMERAL COLLECTIONS. THIS TYPE
347 * OF HANDLE SHOULD BE USED SPARINGLY!
348 */
349 HNDTYPE_PINNED = 3,
350
351 /*
352 * VARIABLE HANDLES
353 *
354 * Variable handles are handles whose type can be changed dynamically. They
355 * are larger than other types of handles, and are scanned a little more often,
356 * but are useful when the handle owner needs an efficient way to change the
357 * strength of a handle on the fly.
358 *
359 */
360 HNDTYPE_VARIABLE = 4,
361
362 /*
363 * REFCOUNTED HANDLES
364 *
365 * Refcounted handles are handles that behave as strong handles while the
366 * refcount on them is greater than 0 and behave as weak handles otherwise.
367 *
368 * N.B. These are currently NOT general purpose.
369 * The implementation is tied to COM Interop.
370 *
371 */
372 HNDTYPE_REFCOUNTED = 5,
373
374 /*
375 * DEPENDENT HANDLES
376 *
377 * Dependent handles are two handles that need to have the same lifetime. One handle refers to a secondary object
378 * that needs to have the same lifetime as the primary object. The secondary object should not cause the primary
379 * object to be referenced, but as long as the primary object is alive, so must be the secondary
380 *
381 * They are currently used for EnC for adding new field members to existing instantiations under EnC modes where
382 * the primary object is the original instantiation and the secondary represents the added field.
383 *
384 * They are also used to implement the ConditionalWeakTable class in mscorlib.dll. If you want to use
385 * these from managed code, they are exposed to BCL through the managed DependentHandle class.
386 *
387 *
388 */
389 HNDTYPE_DEPENDENT = 6,
390
391 /*
392 * PINNED HANDLES for asynchronous operation
393 *
394 * Pinned handles are strong handles which have the added property that they
395 * prevent an object from moving during a garbage collection cycle. This is
396 * useful when passing a pointer to object innards out of the runtime while GC
397 * may be enabled.
398 *
399 * NOTE: PINNING AN OBJECT IS EXPENSIVE AS IT PREVENTS THE GC FROM ACHIEVING
400 * OPTIMAL PACKING OF OBJECTS DURING EPHEMERAL COLLECTIONS. THIS TYPE
401 * OF HANDLE SHOULD BE USED SPARINGLY!
402 */
403 HNDTYPE_ASYNCPINNED = 7,
404
405 /*
406 * SIZEDREF HANDLES
407 *
408 * SizedRef handles are strong handles. Each handle has a piece of user data associated
409 * with it that stores the size of the object this handle refers to. These handles
410 * are scanned as strong roots during each GC but only during full GCs would the size
411 * be calculated.
412 *
413 */
414 HNDTYPE_SIZEDREF = 8,
415
416 /*
417 * WINRT WEAK HANDLES
418 *
419 * WinRT weak reference handles hold two different types of weak handles to any
420 * RCW with an underlying COM object that implements IWeakReferenceSource. The
421 * object reference itself is a short weak handle to the RCW. In addition an
422 * IWeakReference* to the underlying COM object is stored, allowing the handle
423 * to create a new RCW if the existing RCW is collected. This ensures that any
424 * code holding onto a WinRT weak reference can always access an RCW to the
425 * underlying COM object as long as it has not been released by all of its strong
426 * references.
427 */
428 HNDTYPE_WEAK_WINRT = 9
429} HandleType;
430
431typedef enum
432{
433 GC_HEAP_INVALID = 0,
434 GC_HEAP_WKS = 1,
435 GC_HEAP_SVR = 2
436} GCHeapType;
437
438typedef bool (* walk_fn)(Object*, void*);
439typedef void (* gen_walk_fn)(void* context, int generation, uint8_t* range_start, uint8_t* range_end, uint8_t* range_reserved);
440typedef void (* record_surv_fn)(uint8_t* begin, uint8_t* end, ptrdiff_t reloc, void* context, bool compacting_p, bool bgc_p);
441typedef void (* fq_walk_fn)(bool, void*);
442typedef void (* fq_scan_fn)(Object** ppObject, ScanContext *pSC, uint32_t dwFlags);
443typedef void (* handle_scan_fn)(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent);
444typedef bool (* async_pin_enum_fn)(Object* object, void* context);
445
446
447
448class IGCHandleStore {
449public:
450
451 virtual void Uproot() = 0;
452
453 virtual bool ContainsHandle(OBJECTHANDLE handle) = 0;
454
455 virtual OBJECTHANDLE CreateHandleOfType(Object* object, HandleType type) = 0;
456
457 virtual OBJECTHANDLE CreateHandleOfType(Object* object, HandleType type, int heapToAffinitizeTo) = 0;
458
459 virtual OBJECTHANDLE CreateHandleWithExtraInfo(Object* object, HandleType type, void* pExtraInfo) = 0;
460
461 virtual OBJECTHANDLE CreateDependentHandle(Object* primary, Object* secondary) = 0;
462
463 // Relocates async pinned handles from a condemned handle store to the default domain's handle store.
464 //
465 // The two callbacks are called when:
466 // 1. clearIfComplete is called whenever the handle table observes an async pin that is still live.
467 // The callback gives a chance for the EE to unpin the referents if the overlapped operation is complete.
468 // 2. setHandle is called whenever the GC has relocated the async pin to a new handle table. The passed-in
469 // handle is the newly-allocated handle in the default domain that should be assigned to the overlapped object.
470 virtual void RelocateAsyncPinnedHandles(IGCHandleStore* pTarget, void (*clearIfComplete)(Object*), void (*setHandle)(Object*, OBJECTHANDLE)) = 0;
471
472 virtual bool EnumerateAsyncPinnedHandles(async_pin_enum_fn callback, void* context) = 0;
473
474 virtual ~IGCHandleStore() {};
475};
476
477class IGCHandleManager {
478public:
479
480 virtual bool Initialize() = 0;
481
482 virtual void Shutdown() = 0;
483
484 virtual void* GetHandleContext(OBJECTHANDLE handle) = 0;
485
486 virtual IGCHandleStore* GetGlobalHandleStore() = 0;
487
488 virtual IGCHandleStore* CreateHandleStore(void* context) = 0;
489
490 virtual void DestroyHandleStore(IGCHandleStore* store) = 0;
491
492 virtual OBJECTHANDLE CreateGlobalHandleOfType(Object* object, HandleType type) = 0;
493
494 virtual OBJECTHANDLE CreateDuplicateHandle(OBJECTHANDLE handle) = 0;
495
496 virtual void DestroyHandleOfType(OBJECTHANDLE handle, HandleType type) = 0;
497
498 virtual void DestroyHandleOfUnknownType(OBJECTHANDLE handle) = 0;
499
500 virtual void SetExtraInfoForHandle(OBJECTHANDLE handle, HandleType type, void* pExtraInfo) = 0;
501
502 virtual void* GetExtraInfoFromHandle(OBJECTHANDLE handle) = 0;
503
504 virtual void StoreObjectInHandle(OBJECTHANDLE handle, Object* object) = 0;
505
506 virtual bool StoreObjectInHandleIfNull(OBJECTHANDLE handle, Object* object) = 0;
507
508 virtual void SetDependentHandleSecondary(OBJECTHANDLE handle, Object* object) = 0;
509
510 virtual Object* GetDependentHandleSecondary(OBJECTHANDLE handle) = 0;
511
512 virtual Object* InterlockedCompareExchangeObjectInHandle(OBJECTHANDLE handle, Object* object, Object* comparandObject) = 0;
513
514 virtual HandleType HandleFetchType(OBJECTHANDLE handle) = 0;
515
516 virtual void TraceRefCountedHandles(HANDLESCANPROC callback, uintptr_t param1, uintptr_t param2) = 0;
517};
518
519// IGCHeap is the interface that the VM will use when interacting with the GC.
520class IGCHeap {
521public:
522 /*
523 ===========================================================================
524 Hosting APIs. These are used by GC hosting. The code that
525 calls these methods may possibly be moved behind the interface -
526 today, the VM handles the setting of segment size and max gen 0 size.
527 (See src/vm/corehost.cpp)
528 ===========================================================================
529 */
530
531 // Returns whether or not the given size is a valid segment size.
532 virtual bool IsValidSegmentSize(size_t size) = 0;
533
534 // Returns whether or not the given size is a valid gen 0 max size.
535 virtual bool IsValidGen0MaxSize(size_t size) = 0;
536
537 // Gets a valid segment size.
538 virtual size_t GetValidSegmentSize(bool large_seg = false) = 0;
539
540 // Sets the limit for reserved virtual memory.
541 virtual void SetReservedVMLimit(size_t vmlimit) = 0;
542
543 /*
544 ===========================================================================
545 Concurrent GC routines. These are used in various places in the VM
546 to synchronize with the GC, when the VM wants to update something that
547 the GC is potentially using, if it's doing a background GC.
548
549 Concrete examples of this are moving async pinned handles across appdomains
550 and profiling/ETW scenarios.
551 ===========================================================================
552 */
553
554 // Blocks until any running concurrent GCs complete.
555 virtual void WaitUntilConcurrentGCComplete() = 0;
556
557 // Returns true if a concurrent GC is in progress, false otherwise.
558 virtual bool IsConcurrentGCInProgress() = 0;
559
560 // Temporarily enables concurrent GC, used during profiling.
561 virtual void TemporaryEnableConcurrentGC() = 0;
562
563 // Temporarily disables concurrent GC, used during profiling.
564 virtual void TemporaryDisableConcurrentGC() = 0;
565
566 // Returns whether or not Concurrent GC is enabled.
567 virtual bool IsConcurrentGCEnabled() = 0;
568
569 // Wait for a concurrent GC to complete if one is in progress, with the given timeout.
570 virtual HRESULT WaitUntilConcurrentGCCompleteAsync(int millisecondsTimeout) = 0; // Use in native threads. TRUE if succeed. FALSE if failed or timeout
571
572
573 /*
574 ===========================================================================
575 Finalization routines. These are used by the finalizer thread to communicate
576 with the GC.
577 ===========================================================================
578 */
579
580 // Finalizes an app domain by finalizing objects within that app domain.
581 virtual bool FinalizeAppDomain(void* pDomain, bool fRunFinalizers) = 0;
582
583 // Finalizes all registered objects for shutdown, even if they are still reachable.
584 virtual void SetFinalizeQueueForShutdown(bool fHasLock) = 0;
585
586 // Gets the number of finalizable objects.
587 virtual size_t GetNumberOfFinalizable() = 0;
588
589 // Traditionally used by the finalizer thread on shutdown to determine
590 // whether or not to time out. Returns true if the GC lock has not been taken.
591 virtual bool ShouldRestartFinalizerWatchDog() = 0;
592
593 // Gets the next finalizable object.
594 virtual Object* GetNextFinalizable() = 0;
595
596 // Sets whether or not the GC should report all finalizable objects as
597 // ready to be finalized, instead of only collectable objects.
598 virtual void SetFinalizeRunOnShutdown(bool value) = 0;
599
600 /*
601 ===========================================================================
602 BCL routines. These are routines that are directly exposed by mscorlib
603 as a part of the `System.GC` class. These routines behave in the same
604 manner as the functions on `System.GC`.
605 ===========================================================================
606 */
607
608 // Gets memory related information -
609 // highMemLoadThreshold - physical memory load (in percentage) when GC will start to
610 // react aggressively to reclaim memory.
611 // totalPhysicalMem - the total amount of phyiscal memory available on the machine and the memory
612 // limit set on the container if running in a container.
613 // lastRecordedMemLoad - physical memory load in percentage recorded in the last GC
614 // lastRecordedHeapSize - total managed heap size recorded in the last GC
615 // lastRecordedFragmentation - total fragmentation in the managed heap recorded in the last GC
616 virtual void GetMemoryInfo(uint32_t* highMemLoadThreshold,
617 uint64_t* totalPhysicalMem,
618 uint32_t* lastRecordedMemLoad,
619 size_t* lastRecordedHeapSize,
620 size_t* lastRecordedFragmentation) = 0;
621
622 // Gets the current GC latency mode.
623 virtual int GetGcLatencyMode() = 0;
624
625 // Sets the current GC latency mode. newLatencyMode has already been
626 // verified by mscorlib to be valid.
627 virtual int SetGcLatencyMode(int newLatencyMode) = 0;
628
629 // Gets the current LOH compaction mode.
630 virtual int GetLOHCompactionMode() = 0;
631
632 // Sets the current LOH compaction mode. newLOHCompactionMode has
633 // already been verified by mscorlib to be valid.
634 virtual void SetLOHCompactionMode(int newLOHCompactionMode) = 0;
635
636 // Registers for a full GC notification, raising a notification if the gen 2 or
637 // LOH object heap thresholds are exceeded.
638 virtual bool RegisterForFullGCNotification(uint32_t gen2Percentage, uint32_t lohPercentage) = 0;
639
640 // Cancels a full GC notification that was requested by `RegisterForFullGCNotification`.
641 virtual bool CancelFullGCNotification() = 0;
642
643 // Returns the status of a registered notification for determining whether a blocking
644 // Gen 2 collection is about to be initiated, with the given timeout.
645 virtual int WaitForFullGCApproach(int millisecondsTimeout) = 0;
646
647 // Returns the status of a registered notification for determining whether a blocking
648 // Gen 2 collection has completed, with the given timeout.
649 virtual int WaitForFullGCComplete(int millisecondsTimeout) = 0;
650
651 // Returns the generation in which obj is found. Also used by the VM
652 // in some places, in particular syncblk code.
653 virtual unsigned WhichGeneration(Object* obj) = 0;
654
655 // Returns the number of GCs that have transpired in the given generation
656 // since the beginning of the life of the process. Also used by the VM
657 // for debug code and app domains.
658 virtual int CollectionCount(int generation, int get_bgc_fgc_coutn = 0) = 0;
659
660 // Begins a no-GC region, returning a code indicating whether entering the no-GC
661 // region was successful.
662 virtual int StartNoGCRegion(uint64_t totalSize, bool lohSizeKnown, uint64_t lohSize, bool disallowFullBlockingGC) = 0;
663
664 // Exits a no-GC region.
665 virtual int EndNoGCRegion() = 0;
666
667 // Gets the total number of bytes in use.
668 virtual size_t GetTotalBytesInUse() = 0;
669
670 // Forces a garbage collection of the given generation. Also used extensively
671 // throughout the VM.
672 virtual HRESULT GarbageCollect(int generation = -1, bool low_memory_p = false, int mode = collection_blocking) = 0;
673
674 // Gets the largest GC generation. Also used extensively throughout the VM.
675 virtual unsigned GetMaxGeneration() = 0;
676
677 // Indicates that an object's finalizer should not be run upon the object's collection.
678 virtual void SetFinalizationRun(Object* obj) = 0;
679
680 // Indicates that an object's finalizer should be run upon the object's collection.
681 virtual bool RegisterForFinalization(int gen, Object* obj) = 0;
682
683 /*
684 ===========================================================================
685 Miscellaneous routines used by the VM.
686 ===========================================================================
687 */
688
689 // Initializes the GC heap, returning whether or not the initialization
690 // was successful.
691 virtual HRESULT Initialize() = 0;
692
693 // Returns whether nor this GC was promoted by the last GC.
694 virtual bool IsPromoted(Object* object) = 0;
695
696 // Returns true if this pointer points into a GC heap, false otherwise.
697 virtual bool IsHeapPointer(void* object, bool small_heap_only = false) = 0;
698
699 // Return the generation that has been condemned by the current GC.
700 virtual unsigned GetCondemnedGeneration() = 0;
701
702 // Returns whether or not a GC is in progress.
703 virtual bool IsGCInProgressHelper(bool bConsiderGCStart = false) = 0;
704
705 // Returns the number of GCs that have occured. Mainly used for
706 // sanity checks asserting that a GC has not occured.
707 virtual unsigned GetGcCount() = 0;
708
709 // Gets whether or not the home heap of this alloc context matches the heap
710 // associated with this thread.
711 virtual bool IsThreadUsingAllocationContextHeap(gc_alloc_context* acontext, int thread_number) = 0;
712
713 // Returns whether or not this object resides in an ephemeral generation.
714 virtual bool IsEphemeral(Object* object) = 0;
715
716 // Blocks until a GC is complete, returning a code indicating the wait was successful.
717 virtual uint32_t WaitUntilGCComplete(bool bConsiderGCStart = false) = 0;
718
719 // "Fixes" an allocation context by binding its allocation pointer to a
720 // location on the heap.
721 virtual void FixAllocContext(gc_alloc_context* acontext, void* arg, void* heap) = 0;
722
723 // Gets the total survived size plus the total allocated bytes on the heap.
724 virtual size_t GetCurrentObjSize() = 0;
725
726 // Sets whether or not a GC is in progress.
727 virtual void SetGCInProgress(bool fInProgress) = 0;
728
729 // Gets whether or not the GC runtime structures are in a valid state for heap traversal.
730 virtual bool RuntimeStructuresValid() = 0;
731
732 // Tells the GC when the VM is suspending threads.
733 virtual void SetSuspensionPending(bool fSuspensionPending) = 0;
734
735 // Tells the GC how many YieldProcessor calls are equal to one scaled yield processor call.
736 virtual void SetYieldProcessorScalingFactor(float yieldProcessorScalingFactor) = 0;
737
738 /*
739 ============================================================================
740 Add/RemoveMemoryPressure support routines. These are on the interface
741 for now, but we should move Add/RemoveMemoryPressure from the VM to the GC.
742 When that occurs, these three routines can be removed from the interface.
743 ============================================================================
744 */
745
746 // Get the timestamp corresponding to the last GC that occured for the
747 // given generation.
748 virtual size_t GetLastGCStartTime(int generation) = 0;
749
750 // Gets the duration of the last GC that occured for the given generation.
751 virtual size_t GetLastGCDuration(int generation) = 0;
752
753 // Gets a timestamp for the current moment in time.
754 virtual size_t GetNow() = 0;
755
756 /*
757 ===========================================================================
758 Allocation routines. These all call into the GC's allocator and may trigger a garbage
759 collection. All allocation routines return NULL when the allocation request
760 couldn't be serviced due to being out of memory.
761 ===========================================================================
762 */
763
764 // Allocates an object on the given allocation context with the given size and flags.
765 // It is the responsibility of the caller to ensure that the passed-in alloc context is
766 // owned by the thread that is calling this function. If using per-thread alloc contexts,
767 // no lock is needed; callers not using per-thread alloc contexts will need to acquire
768 // a lock to ensure that the calling thread has unique ownership over this alloc context;
769 virtual Object* Alloc(gc_alloc_context* acontext, size_t size, uint32_t flags) = 0;
770
771 // Allocates an object on the large object heap with the given size and flags.
772 virtual Object* AllocLHeap(size_t size, uint32_t flags) = 0;
773
774 // Allocates an object on the given allocation context, aligned to 64 bits,
775 // with the given size and flags.
776 // It is the responsibility of the caller to ensure that the passed-in alloc context is
777 // owned by the thread that is calling this function. If using per-thread alloc contexts,
778 // no lock is needed; callers not using per-thread alloc contexts will need to acquire
779 // a lock to ensure that the calling thread has unique ownership over this alloc context.
780 virtual Object* AllocAlign8(gc_alloc_context* acontext, size_t size, uint32_t flags) = 0;
781
782 // This is for the allocator to indicate it's done allocating a large object during a
783 // background GC as the BGC threads also need to walk LOH.
784 virtual void PublishObject(uint8_t* obj) = 0;
785
786 // Signals the WaitForGCEvent event, indicating that a GC has completed.
787 virtual void SetWaitForGCEvent() = 0;
788
789 // Resets the state of the WaitForGCEvent back to an unsignalled state.
790 virtual void ResetWaitForGCEvent() = 0;
791
792 /*
793 ===========================================================================
794 Heap verification routines. These are used during heap verification only.
795 ===========================================================================
796 */
797 // Returns whether or not this object is in the fixed heap.
798 virtual bool IsObjectInFixedHeap(Object* pObj) = 0;
799
800 // Walks an object and validates its members.
801 virtual void ValidateObjectMember(Object* obj) = 0;
802
803 // Retrieves the next object after the given object. When the EE
804 // is not suspended, the result is not accurate - if the input argument
805 // is in Gen0, the function could return zeroed out memory as the next object.
806 virtual Object* NextObj(Object* object) = 0;
807
808 // Given an interior pointer, return a pointer to the object
809 // containing that pointer. This is safe to call only when the EE is suspended.
810 // When fCollectedGenOnly is true, it only returns the object if it's found in
811 // the generation(s) that are being collected.
812 virtual Object* GetContainingObject(void* pInteriorPtr, bool fCollectedGenOnly) = 0;
813
814 /*
815 ===========================================================================
816 Profiling routines. Used for event tracing and profiling to broadcast
817 information regarding the heap.
818 ===========================================================================
819 */
820
821 // Walks an object, invoking a callback on each member.
822 virtual void DiagWalkObject(Object* obj, walk_fn fn, void* context) = 0;
823
824 // Walk the heap object by object.
825 virtual void DiagWalkHeap(walk_fn fn, void* context, int gen_number, bool walk_large_object_heap_p) = 0;
826
827 // Walks the survivors and get the relocation information if objects have moved.
828 virtual void DiagWalkSurvivorsWithType(void* gc_context, record_surv_fn fn, void* diag_context, walk_surv_type type) = 0;
829
830 // Walks the finalization queue.
831 virtual void DiagWalkFinalizeQueue(void* gc_context, fq_walk_fn fn) = 0;
832
833 // Scan roots on finalizer queue. This is a generic function.
834 virtual void DiagScanFinalizeQueue(fq_scan_fn fn, ScanContext* context) = 0;
835
836 // Scan handles for profiling or ETW.
837 virtual void DiagScanHandles(handle_scan_fn fn, int gen_number, ScanContext* context) = 0;
838
839 // Scan dependent handles for profiling or ETW.
840 virtual void DiagScanDependentHandles(handle_scan_fn fn, int gen_number, ScanContext* context) = 0;
841
842 // Describes all generations to the profiler, invoking a callback on each generation.
843 virtual void DiagDescrGenerations(gen_walk_fn fn, void* context) = 0;
844
845 // Traces all GC segments and fires ETW events with information on them.
846 virtual void DiagTraceGCSegments() = 0;
847
848 /*
849 ===========================================================================
850 GC Stress routines. Used only when running under GC Stress.
851 ===========================================================================
852 */
853
854 // Returns TRUE if GC actually happens, otherwise FALSE. The passed alloc context
855 // must not be null.
856 virtual bool StressHeap(gc_alloc_context* acontext) = 0;
857
858 /*
859 ===========================================================================
860 Routines to register read only segments for frozen objects.
861 Only valid if FEATURE_BASICFREEZE is defined.
862 ===========================================================================
863 */
864
865 // Registers a frozen segment with the GC.
866 virtual segment_handle RegisterFrozenSegment(segment_info *pseginfo) = 0;
867
868 // Unregisters a frozen segment.
869 virtual void UnregisterFrozenSegment(segment_handle seg) = 0;
870
871 /*
872 ===========================================================================
873 Routines for informing the GC about which events are enabled.
874 ===========================================================================
875 */
876
877 // Enables or disables the given keyword or level on the default event provider.
878 virtual void ControlEvents(GCEventKeyword keyword, GCEventLevel level) = 0;
879
880 // Enables or disables the given keyword or level on the private event provider.
881 virtual void ControlPrivateEvents(GCEventKeyword keyword, GCEventLevel level) = 0;
882
883 IGCHeap() {}
884 virtual ~IGCHeap() {}
885};
886
887#ifdef WRITE_BARRIER_CHECK
888void updateGCShadow(Object** ptr, Object* val);
889#endif
890
891//constants for the flags parameter to the gc call back
892
893#define GC_CALL_INTERIOR 0x1
894#define GC_CALL_PINNED 0x2
895#define GC_CALL_CHECK_APP_DOMAIN 0x4
896
897//flags for IGCHeapAlloc(...)
898#define GC_ALLOC_FINALIZE 0x1
899#define GC_ALLOC_CONTAINS_REF 0x2
900#define GC_ALLOC_ALIGN8_BIAS 0x4
901#define GC_ALLOC_ALIGN8 0x8
902
903#if defined(USE_CHECKED_OBJECTREFS) && !defined(_NOVM)
904#define OBJECTREF_TO_UNCHECKED_OBJECTREF(objref) (*((_UNCHECKED_OBJECTREF*)&(objref)))
905#define UNCHECKED_OBJECTREF_TO_OBJECTREF(obj) (OBJECTREF(obj))
906#else
907#define OBJECTREF_TO_UNCHECKED_OBJECTREF(objref) (objref)
908#define UNCHECKED_OBJECTREF_TO_OBJECTREF(obj) (obj)
909#endif
910
911struct ScanContext
912{
913 Thread* thread_under_crawl;
914 int thread_number;
915 uintptr_t stack_limit; // Lowest point on the thread stack that the scanning logic is permitted to read
916 bool promotion; //TRUE: Promotion, FALSE: Relocation.
917 bool concurrent; //TRUE: concurrent scanning
918#if defined (FEATURE_APPDOMAIN_RESOURCE_MONITORING) || defined (DACCESS_COMPILE)
919 AppDomain *pCurrentDomain;
920#else
921 void* _unused1;
922#endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING || DACCESS_COMPILE
923 void* pMD;
924#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
925 EtwGCRootKind dwEtwRootKind;
926#else
927 EtwGCRootKind _unused3;
928#endif // GC_PROFILING || FEATURE_EVENT_TRACE
929
930 ScanContext()
931 {
932 LIMITED_METHOD_CONTRACT;
933
934 thread_under_crawl = 0;
935 thread_number = -1;
936 stack_limit = 0;
937 promotion = false;
938 concurrent = false;
939 pMD = NULL;
940#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
941 dwEtwRootKind = kEtwGCRootKindOther;
942#endif
943 }
944};
945
946// These types are used as part of the loader protocol between the EE
947// and the GC.
948struct VersionInfo {
949 uint32_t MajorVersion;
950 uint32_t MinorVersion;
951 uint32_t BuildVersion;
952 const char* Name;
953};
954
955typedef void (*GC_VersionInfoFunction)(
956 /* Out */ VersionInfo*
957);
958
959typedef HRESULT (*GC_InitializeFunction)(
960 /* In */ IGCToCLR*,
961 /* Out */ IGCHeap**,
962 /* Out */ IGCHandleManager**,
963 /* Out */ GcDacVars*
964);
965
966#endif // _GC_INTERFACE_H_
967