1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | /* |
6 | * GCENV.EE.CPP |
7 | * |
8 | * GCToEEInterface implementation |
9 | * |
10 | |
11 | * |
12 | */ |
13 | |
14 | void GCToEEInterface::SuspendEE(SUSPEND_REASON reason) |
15 | { |
16 | WRAPPER_NO_CONTRACT; |
17 | |
18 | static_assert_no_msg(SUSPEND_FOR_GC == (int)ThreadSuspend::SUSPEND_FOR_GC); |
19 | static_assert_no_msg(SUSPEND_FOR_GC_PREP == (int)ThreadSuspend::SUSPEND_FOR_GC_PREP); |
20 | |
21 | _ASSERTE(reason == SUSPEND_FOR_GC || reason == SUSPEND_FOR_GC_PREP); |
22 | |
23 | g_pDebugInterface->SuspendForGarbageCollectionStarted(); |
24 | |
25 | ThreadSuspend::SuspendEE((ThreadSuspend::SUSPEND_REASON)reason); |
26 | |
27 | g_pDebugInterface->SuspendForGarbageCollectionCompleted(); |
28 | } |
29 | |
30 | void GCToEEInterface::RestartEE(bool bFinishedGC) |
31 | { |
32 | WRAPPER_NO_CONTRACT; |
33 | |
34 | g_pDebugInterface->ResumeForGarbageCollectionStarted(); |
35 | |
36 | ThreadSuspend::RestartEE(bFinishedGC, TRUE); |
37 | } |
38 | |
39 | VOID GCToEEInterface::SyncBlockCacheWeakPtrScan(HANDLESCANPROC scanProc, uintptr_t lp1, uintptr_t lp2) |
40 | { |
41 | CONTRACTL |
42 | { |
43 | NOTHROW; |
44 | GC_NOTRIGGER; |
45 | } |
46 | CONTRACTL_END; |
47 | |
48 | SyncBlockCache::GetSyncBlockCache()->GCWeakPtrScan(scanProc, lp1, lp2); |
49 | } |
50 | |
51 | //EE can perform post stack scanning action, while the |
52 | // user threads are still suspended |
53 | VOID GCToEEInterface::AfterGcScanRoots (int condemned, int max_gen, |
54 | ScanContext* sc) |
55 | { |
56 | CONTRACTL |
57 | { |
58 | NOTHROW; |
59 | GC_NOTRIGGER; |
60 | } |
61 | CONTRACTL_END; |
62 | |
63 | #ifdef FEATURE_COMINTEROP |
64 | // Go through all the app domains and for each one detach all the *unmarked* RCWs to prevent |
65 | // the RCW cache from resurrecting them. |
66 | UnsafeAppDomainIterator i(TRUE); |
67 | i.Init(); |
68 | |
69 | while (i.Next()) |
70 | { |
71 | i.GetDomain()->DetachRCWs(); |
72 | } |
73 | #endif // FEATURE_COMINTEROP |
74 | } |
75 | |
76 | /* |
77 | * Scan all stack roots |
78 | */ |
79 | |
80 | static void ScanStackRoots(Thread * pThread, promote_func* fn, ScanContext* sc) |
81 | { |
82 | GCCONTEXT gcctx; |
83 | |
84 | gcctx.f = fn; |
85 | gcctx.sc = sc; |
86 | gcctx.cf = NULL; |
87 | |
88 | ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE(); |
89 | |
90 | // Either we are in a concurrent situation (in which case the thread is unknown to |
91 | // us), or we are performing a synchronous GC and we are the GC thread, holding |
92 | // the threadstore lock. |
93 | |
94 | _ASSERTE(dbgOnly_IsSpecialEEThread() || |
95 | GetThread() == NULL || |
96 | // this is for background GC threads which always call this when EE is suspended. |
97 | IsGCSpecialThread() || |
98 | (GetThread() == ThreadSuspend::GetSuspensionThread() && ThreadStore::HoldingThreadStore())); |
99 | |
100 | pThread->SetHasPromotedBytes(); |
101 | |
102 | Frame* pTopFrame = pThread->GetFrame(); |
103 | Object ** topStack = (Object **)pTopFrame; |
104 | if ((pTopFrame != ((Frame*)-1)) |
105 | && (pTopFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())) { |
106 | // It is an InlinedCallFrame. Get SP from it. |
107 | InlinedCallFrame* pInlinedFrame = (InlinedCallFrame*)pTopFrame; |
108 | topStack = (Object **)pInlinedFrame->GetCallSiteSP(); |
109 | } |
110 | |
111 | sc->stack_limit = (uintptr_t)topStack; |
112 | |
113 | #ifdef FEATURE_CONSERVATIVE_GC |
114 | if (g_pConfig->GetGCConservative()) |
115 | { |
116 | // Conservative stack root reporting |
117 | // We will treat everything on stack as a pinned interior GC pointer |
118 | // Since we report every thing as pinned, we don't need to run following code for relocation phase. |
119 | if (sc->promotion) |
120 | { |
121 | Object ** bottomStack = (Object **) pThread->GetCachedStackBase(); |
122 | Object ** walk; |
123 | for (walk = topStack; walk < bottomStack; walk ++) |
124 | { |
125 | if (((void*)*walk > (void*)bottomStack || (void*)*walk < (void*)topStack) && |
126 | ((void*)*walk >= (void*)g_lowest_address && (void*)*walk <= (void*)g_highest_address) |
127 | ) |
128 | { |
129 | //DbgPrintf("promote " FMT_ADDR " : " FMT_ADDR "\n", walk, *walk); |
130 | fn(walk, sc, GC_CALL_INTERIOR|GC_CALL_PINNED); |
131 | } |
132 | } |
133 | } |
134 | |
135 | // Also ask the explicit Frames to report any references they might know about. |
136 | // Generally these will be a subset of the objects reported below but there's |
137 | // nothing that guarantees that and in the specific case of a GC protect frame the |
138 | // references it protects may live at a lower address than the frame itself (and |
139 | // thus escape the stack range we scanned above). |
140 | Frame *pFrame = pThread->GetFrame(); |
141 | while (pFrame != FRAME_TOP) |
142 | { |
143 | pFrame->GcScanRoots(fn, sc); |
144 | pFrame = pFrame->PtrNextFrame(); |
145 | } |
146 | } |
147 | else |
148 | #endif |
149 | { |
150 | unsigned flagsStackWalk = ALLOW_ASYNC_STACK_WALK | ALLOW_INVALID_OBJECTS; |
151 | #if defined(WIN64EXCEPTIONS) |
152 | flagsStackWalk |= GC_FUNCLET_REFERENCE_REPORTING; |
153 | #endif // defined(WIN64EXCEPTIONS) |
154 | pThread->StackWalkFrames( GcStackCrawlCallBack, &gcctx, flagsStackWalk); |
155 | } |
156 | } |
157 | |
158 | void GCToEEInterface::GcScanRoots(promote_func* fn, int condemned, int max_gen, ScanContext* sc) |
159 | { |
160 | STRESS_LOG1(LF_GCROOTS, LL_INFO10, "GCScan: Promotion Phase = %d\n" , sc->promotion); |
161 | |
162 | // In server GC, we should be competing for marking the statics |
163 | if (GCHeapUtilities::MarkShouldCompeteForStatics()) |
164 | { |
165 | if (condemned == max_gen && sc->promotion) |
166 | { |
167 | SystemDomain::EnumAllStaticGCRefs(fn, sc); |
168 | } |
169 | } |
170 | |
171 | Thread* pThread = NULL; |
172 | while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL) |
173 | { |
174 | STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "{ Starting scan of Thread %p ID = %x\n" , pThread, pThread->GetThreadId()); |
175 | |
176 | if (GCHeapUtilities::GetGCHeap()->IsThreadUsingAllocationContextHeap( |
177 | pThread->GetAllocContext(), sc->thread_number)) |
178 | { |
179 | sc->thread_under_crawl = pThread; |
180 | #ifdef FEATURE_EVENT_TRACE |
181 | sc->dwEtwRootKind = kEtwGCRootKindStack; |
182 | #endif // FEATURE_EVENT_TRACE |
183 | ScanStackRoots(pThread, fn, sc); |
184 | #ifdef FEATURE_EVENT_TRACE |
185 | sc->dwEtwRootKind = kEtwGCRootKindOther; |
186 | #endif // FEATURE_EVENT_TRACE |
187 | } |
188 | STRESS_LOG2(LF_GC | LF_GCROOTS, LL_INFO100, "Ending scan of Thread %p ID = 0x%x }\n" , pThread, pThread->GetThreadId()); |
189 | } |
190 | } |
191 | |
192 | void GCToEEInterface::GcStartWork (int condemned, int max_gen) |
193 | { |
194 | CONTRACTL |
195 | { |
196 | NOTHROW; |
197 | GC_NOTRIGGER; |
198 | } |
199 | CONTRACTL_END; |
200 | |
201 | #ifdef VERIFY_HEAP |
202 | // Validate byrefs pinned by IL stubs since the last GC. |
203 | StubHelpers::ProcessByrefValidationList(); |
204 | #endif // VERIFY_HEAP |
205 | |
206 | ExecutionManager::CleanupCodeHeaps(); |
207 | |
208 | #ifdef FEATURE_EVENT_TRACE |
209 | ETW::TypeSystemLog::Cleanup(); |
210 | #endif |
211 | |
212 | #ifdef FEATURE_COMINTEROP |
213 | // |
214 | // Let GC detect managed/native cycles with input from jupiter |
215 | // Jupiter will |
216 | // 1. Report reference from RCW to CCW based on native reference in Jupiter |
217 | // 2. Identify the subset of CCWs that needs to be rooted |
218 | // |
219 | // We'll build the references from RCW to CCW using |
220 | // 1. Preallocated arrays |
221 | // 2. Dependent handles |
222 | // |
223 | RCWWalker::OnGCStarted(condemned); |
224 | #endif // FEATURE_COMINTEROP |
225 | |
226 | if (condemned == max_gen) |
227 | { |
228 | ThreadStore::s_pThreadStore->OnMaxGenerationGCStarted(); |
229 | } |
230 | } |
231 | |
232 | void GCToEEInterface::GcDone(int condemned) |
233 | { |
234 | CONTRACTL |
235 | { |
236 | NOTHROW; |
237 | GC_NOTRIGGER; |
238 | } |
239 | CONTRACTL_END; |
240 | |
241 | #ifdef FEATURE_COMINTEROP |
242 | // |
243 | // Tell Jupiter GC has finished |
244 | // |
245 | RCWWalker::OnGCFinished(condemned); |
246 | #endif // FEATURE_COMINTEROP |
247 | } |
248 | |
249 | bool GCToEEInterface::RefCountedHandleCallbacks(Object * pObject) |
250 | { |
251 | CONTRACTL |
252 | { |
253 | NOTHROW; |
254 | GC_NOTRIGGER; |
255 | } |
256 | CONTRACTL_END; |
257 | |
258 | #ifdef FEATURE_COMINTEROP |
259 | //<REVISIT_TODO>@todo optimize the access to the ref-count |
260 | ComCallWrapper* pWrap = ComCallWrapper::GetWrapperForObject((OBJECTREF)pObject); |
261 | |
262 | return pWrap != NULL && pWrap->IsWrapperActive(); |
263 | #else |
264 | return false; |
265 | #endif |
266 | } |
267 | |
268 | void GCToEEInterface::GcBeforeBGCSweepWork() |
269 | { |
270 | CONTRACTL |
271 | { |
272 | NOTHROW; |
273 | GC_NOTRIGGER; |
274 | } |
275 | CONTRACTL_END; |
276 | |
277 | #ifdef VERIFY_HEAP |
278 | // Validate byrefs pinned by IL stubs since the last GC. |
279 | StubHelpers::ProcessByrefValidationList(); |
280 | #endif // VERIFY_HEAP |
281 | } |
282 | |
283 | void GCToEEInterface::SyncBlockCacheDemote(int max_gen) |
284 | { |
285 | CONTRACTL |
286 | { |
287 | NOTHROW; |
288 | GC_NOTRIGGER; |
289 | } |
290 | CONTRACTL_END; |
291 | |
292 | SyncBlockCache::GetSyncBlockCache()->GCDone(TRUE, max_gen); |
293 | } |
294 | |
295 | void GCToEEInterface::SyncBlockCachePromotionsGranted(int max_gen) |
296 | { |
297 | CONTRACTL |
298 | { |
299 | NOTHROW; |
300 | GC_NOTRIGGER; |
301 | } |
302 | CONTRACTL_END; |
303 | |
304 | SyncBlockCache::GetSyncBlockCache()->GCDone(FALSE, max_gen); |
305 | } |
306 | |
307 | uint32_t GCToEEInterface::GetActiveSyncBlockCount() |
308 | { |
309 | CONTRACTL |
310 | { |
311 | NOTHROW; |
312 | GC_NOTRIGGER; |
313 | } |
314 | CONTRACTL_END; |
315 | |
316 | return SyncBlockCache::GetSyncBlockCache()->GetActiveCount(); |
317 | } |
318 | |
319 | gc_alloc_context * GCToEEInterface::GetAllocContext() |
320 | { |
321 | WRAPPER_NO_CONTRACT; |
322 | |
323 | Thread* pThread = ::GetThread(); |
324 | if (!pThread) |
325 | { |
326 | return nullptr; |
327 | } |
328 | |
329 | return pThread->GetAllocContext(); |
330 | } |
331 | |
332 | void GCToEEInterface::GcEnumAllocContexts(enum_alloc_context_func* fn, void* param) |
333 | { |
334 | CONTRACTL |
335 | { |
336 | NOTHROW; |
337 | GC_NOTRIGGER; |
338 | } |
339 | CONTRACTL_END; |
340 | |
341 | if (GCHeapUtilities::UseThreadAllocationContexts()) |
342 | { |
343 | Thread * pThread = NULL; |
344 | while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL) |
345 | { |
346 | fn(pThread->GetAllocContext(), param); |
347 | } |
348 | } |
349 | else |
350 | { |
351 | fn(&g_global_alloc_context, param); |
352 | } |
353 | } |
354 | |
355 | |
356 | uint8_t* GCToEEInterface::GetLoaderAllocatorObjectForGC(Object* pObject) |
357 | { |
358 | CONTRACTL |
359 | { |
360 | NOTHROW; |
361 | GC_NOTRIGGER; |
362 | } |
363 | CONTRACTL_END; |
364 | |
365 | return pObject->GetGCSafeMethodTable()->GetLoaderAllocatorObjectForGC(); |
366 | } |
367 | |
368 | bool GCToEEInterface::IsPreemptiveGCDisabled() |
369 | { |
370 | WRAPPER_NO_CONTRACT; |
371 | |
372 | Thread* pThread = ::GetThread(); |
373 | if (pThread) |
374 | { |
375 | return !!pThread->PreemptiveGCDisabled(); |
376 | } |
377 | |
378 | return false; |
379 | } |
380 | |
381 | bool GCToEEInterface::EnablePreemptiveGC() |
382 | { |
383 | WRAPPER_NO_CONTRACT; |
384 | |
385 | bool bToggleGC = false; |
386 | Thread* pThread = ::GetThread(); |
387 | |
388 | if (pThread) |
389 | { |
390 | bToggleGC = !!pThread->PreemptiveGCDisabled(); |
391 | if (bToggleGC) |
392 | { |
393 | pThread->EnablePreemptiveGC(); |
394 | } |
395 | } |
396 | |
397 | return bToggleGC; |
398 | } |
399 | |
400 | void GCToEEInterface::DisablePreemptiveGC() |
401 | { |
402 | WRAPPER_NO_CONTRACT; |
403 | |
404 | Thread* pThread = ::GetThread(); |
405 | if (pThread) |
406 | { |
407 | pThread->DisablePreemptiveGC(); |
408 | } |
409 | } |
410 | |
411 | Thread* GCToEEInterface::GetThread() |
412 | { |
413 | WRAPPER_NO_CONTRACT; |
414 | |
415 | return ::GetThread(); |
416 | } |
417 | |
418 | struct BackgroundThreadStubArgs |
419 | { |
420 | Thread* thread; |
421 | GCBackgroundThreadFunction threadStart; |
422 | void* arg; |
423 | CLREvent threadStartedEvent; |
424 | bool hasStarted; |
425 | }; |
426 | |
427 | DWORD WINAPI BackgroundThreadStub(void* arg) |
428 | { |
429 | BackgroundThreadStubArgs* stubArgs = (BackgroundThreadStubArgs*)arg; |
430 | assert (stubArgs->thread != NULL); |
431 | |
432 | ClrFlsSetThreadType (ThreadType_GC); |
433 | stubArgs->thread->SetGCSpecial(true); |
434 | STRESS_LOG_RESERVE_MEM (GC_STRESSLOG_MULTIPLY); |
435 | |
436 | stubArgs->hasStarted = !!stubArgs->thread->HasStarted(FALSE); |
437 | |
438 | Thread* thread = stubArgs->thread; |
439 | GCBackgroundThreadFunction realThreadStart = stubArgs->threadStart; |
440 | void* realThreadArg = stubArgs->arg; |
441 | bool hasStarted = stubArgs->hasStarted; |
442 | |
443 | stubArgs->threadStartedEvent.Set(); |
444 | // The stubArgs cannot be used once the event is set, since that releases wait on the |
445 | // event in the function that created this thread and the stubArgs go out of scope. |
446 | |
447 | DWORD result = 0; |
448 | |
449 | if (hasStarted) |
450 | { |
451 | result = realThreadStart(realThreadArg); |
452 | DestroyThread(thread); |
453 | } |
454 | |
455 | return result; |
456 | } |
457 | |
458 | // |
459 | // Diagnostics code |
460 | // |
461 | |
462 | #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
463 | inline BOOL ShouldTrackMovementForProfilerOrEtw() |
464 | { |
465 | #ifdef GC_PROFILING |
466 | if (CORProfilerTrackGC()) |
467 | return true; |
468 | #endif |
469 | |
470 | #ifdef FEATURE_EVENT_TRACE |
471 | if (ETW::GCLog::ShouldTrackMovementForEtw()) |
472 | return true; |
473 | #endif |
474 | |
475 | return false; |
476 | } |
477 | #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
478 | |
479 | void ProfScanRootsHelper(Object** ppObject, ScanContext *pSC, uint32_t dwFlags) |
480 | { |
481 | #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
482 | Object *pObj = *ppObject; |
483 | if (dwFlags & GC_CALL_INTERIOR) |
484 | { |
485 | pObj = GCHeapUtilities::GetGCHeap()->GetContainingObject(pObj, true); |
486 | if (pObj == nullptr) |
487 | return; |
488 | } |
489 | ScanRootsHelper(pObj, ppObject, pSC, dwFlags); |
490 | #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
491 | } |
492 | |
493 | // TODO - at some point we would like to completely decouple profiling |
494 | // from ETW tracing using a pattern similar to this, where the |
495 | // ProfilingScanContext has flags about whether or not certain things |
496 | // should be tracked, and each one of these ProfilerShouldXYZ functions |
497 | // will check these flags and determine what to do based upon that. |
498 | // GCProfileWalkHeapWorker can, in turn, call those methods without fear |
499 | // of things being ifdef'd out. |
500 | |
501 | // Returns TRUE if GC profiling is enabled and the profiler |
502 | // should scan dependent handles, FALSE otherwise. |
503 | BOOL ProfilerShouldTrackConditionalWeakTableElements() |
504 | { |
505 | #if defined(GC_PROFILING) |
506 | return CORProfilerTrackConditionalWeakTableElements(); |
507 | #else |
508 | return FALSE; |
509 | #endif // defined (GC_PROFILING) |
510 | } |
511 | |
512 | // If GC profiling is enabled, informs the profiler that we are done |
513 | // tracing dependent handles. |
514 | void ProfilerEndConditionalWeakTableElementReferences(void* heapId) |
515 | { |
516 | #if defined (GC_PROFILING) |
517 | g_profControlBlock.pProfInterface->EndConditionalWeakTableElementReferences(heapId); |
518 | #else |
519 | UNREFERENCED_PARAMETER(heapId); |
520 | #endif // defined (GC_PROFILING) |
521 | } |
522 | |
523 | // If GC profiling is enabled, informs the profiler that we are done |
524 | // tracing root references. |
525 | void ProfilerEndRootReferences2(void* heapId) |
526 | { |
527 | #if defined (GC_PROFILING) |
528 | g_profControlBlock.pProfInterface->EndRootReferences2(heapId); |
529 | #else |
530 | UNREFERENCED_PARAMETER(heapId); |
531 | #endif // defined (GC_PROFILING) |
532 | } |
533 | |
534 | void GcScanRootsForProfilerAndETW(promote_func* fn, int condemned, int max_gen, ScanContext* sc) |
535 | { |
536 | Thread* pThread = NULL; |
537 | while ((pThread = ThreadStore::GetThreadList(pThread)) != NULL) |
538 | { |
539 | sc->thread_under_crawl = pThread; |
540 | #ifdef FEATURE_EVENT_TRACE |
541 | sc->dwEtwRootKind = kEtwGCRootKindStack; |
542 | #endif // FEATURE_EVENT_TRACE |
543 | ScanStackRoots(pThread, fn, sc); |
544 | #ifdef FEATURE_EVENT_TRACE |
545 | sc->dwEtwRootKind = kEtwGCRootKindOther; |
546 | #endif // FEATURE_EVENT_TRACE |
547 | } |
548 | } |
549 | |
550 | void ScanHandleForProfilerAndETW(Object** pRef, Object* pSec, uint32_t flags, ScanContext* context, bool isDependent) |
551 | { |
552 | ProfilingScanContext* pSC = (ProfilingScanContext*)context; |
553 | |
554 | #ifdef GC_PROFILING |
555 | // Give the profiler the objectref. |
556 | if (pSC->fProfilerPinned) |
557 | { |
558 | if (!isDependent) |
559 | { |
560 | BEGIN_PIN_PROFILER(CORProfilerTrackGC()); |
561 | g_profControlBlock.pProfInterface->RootReference2( |
562 | (uint8_t *)*pRef, |
563 | kEtwGCRootKindHandle, |
564 | (EtwGCRootFlags)flags, |
565 | pRef, |
566 | &pSC->pHeapId); |
567 | END_PIN_PROFILER(); |
568 | } |
569 | else |
570 | { |
571 | BEGIN_PIN_PROFILER(CORProfilerTrackConditionalWeakTableElements()); |
572 | g_profControlBlock.pProfInterface->ConditionalWeakTableElementReference( |
573 | (uint8_t*)*pRef, |
574 | (uint8_t*)pSec, |
575 | pRef, |
576 | &pSC->pHeapId); |
577 | END_PIN_PROFILER(); |
578 | } |
579 | } |
580 | #endif // GC_PROFILING |
581 | |
582 | #if defined(FEATURE_EVENT_TRACE) |
583 | // Notify ETW of the handle |
584 | if (ETW::GCLog::ShouldWalkHeapRootsForEtw()) |
585 | { |
586 | ETW::GCLog::RootReference( |
587 | pRef, |
588 | *pRef, // object being rooted |
589 | pSec, // pSecondaryNodeForDependentHandle |
590 | isDependent, |
591 | pSC, |
592 | 0, // dwGCFlags, |
593 | flags); // ETW handle flags |
594 | } |
595 | #endif // defined(FEATURE_EVENT_TRACE) |
596 | } |
597 | |
598 | // This is called only if we've determined that either: |
599 | // a) The Profiling API wants to do a walk of the heap, and it has pinned the |
600 | // profiler in place (so it cannot be detached), and it's thus safe to call into the |
601 | // profiler, OR |
602 | // b) ETW infrastructure wants to do a walk of the heap either to log roots, |
603 | // objects, or both. |
604 | // This can also be called to do a single walk for BOTH a) and b) simultaneously. Since |
605 | // ETW can ask for roots, but not objects |
606 | #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
607 | void GCProfileWalkHeapWorker(BOOL fProfilerPinned, BOOL fShouldWalkHeapRootsForEtw, BOOL fShouldWalkHeapObjectsForEtw) |
608 | { |
609 | { |
610 | ProfilingScanContext SC(fProfilerPinned); |
611 | unsigned max_generation = GCHeapUtilities::GetGCHeap()->GetMaxGeneration(); |
612 | |
613 | // **** Scan roots: Only scan roots if profiling API wants them or ETW wants them. |
614 | if (fProfilerPinned || fShouldWalkHeapRootsForEtw) |
615 | { |
616 | GcScanRootsForProfilerAndETW(&ProfScanRootsHelper, max_generation, max_generation, &SC); |
617 | SC.dwEtwRootKind = kEtwGCRootKindFinalizer; |
618 | GCHeapUtilities::GetGCHeap()->DiagScanFinalizeQueue(&ProfScanRootsHelper, &SC); |
619 | |
620 | // Handles are kept independent of wks/svr/concurrent builds |
621 | SC.dwEtwRootKind = kEtwGCRootKindHandle; |
622 | GCHeapUtilities::GetGCHeap()->DiagScanHandles(&ScanHandleForProfilerAndETW, max_generation, &SC); |
623 | |
624 | // indicate that regular handle scanning is over, so we can flush the buffered roots |
625 | // to the profiler. (This is for profapi only. ETW will flush after the |
626 | // entire heap was is complete, via ETW::GCLog::EndHeapDump.) |
627 | if (fProfilerPinned) |
628 | { |
629 | ProfilerEndRootReferences2(&SC.pHeapId); |
630 | } |
631 | } |
632 | |
633 | // **** Scan dependent handles: only if the profiler supports it or ETW wants roots |
634 | if ((fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) || |
635 | fShouldWalkHeapRootsForEtw) |
636 | { |
637 | // GcScanDependentHandlesForProfiler double-checks |
638 | // CORProfilerTrackConditionalWeakTableElements() before calling into the profiler |
639 | |
640 | ProfilingScanContext* pSC = &SC; |
641 | |
642 | // we'll re-use pHeapId (which was either unused (0) or freed by EndRootReferences2 |
643 | // (-1)), so reset it to NULL |
644 | _ASSERTE((*((size_t *)(&pSC->pHeapId)) == (size_t)(-1)) || |
645 | (*((size_t *)(&pSC->pHeapId)) == (size_t)(0))); |
646 | pSC->pHeapId = NULL; |
647 | |
648 | GCHeapUtilities::GetGCHeap()->DiagScanDependentHandles(&ScanHandleForProfilerAndETW, max_generation, &SC); |
649 | |
650 | // indicate that dependent handle scanning is over, so we can flush the buffered roots |
651 | // to the profiler. (This is for profapi only. ETW will flush after the |
652 | // entire heap was is complete, via ETW::GCLog::EndHeapDump.) |
653 | if (fProfilerPinned && ProfilerShouldTrackConditionalWeakTableElements()) |
654 | { |
655 | ProfilerEndConditionalWeakTableElementReferences(&SC.pHeapId); |
656 | } |
657 | } |
658 | |
659 | ProfilerWalkHeapContext profilerWalkHeapContext(fProfilerPinned, SC.pvEtwContext); |
660 | |
661 | // **** Walk objects on heap: only if profiling API wants them or ETW wants them. |
662 | if (fProfilerPinned || fShouldWalkHeapObjectsForEtw) |
663 | { |
664 | GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&HeapWalkHelper, &profilerWalkHeapContext, max_generation, true /* walk the large object heap */); |
665 | } |
666 | |
667 | #ifdef FEATURE_EVENT_TRACE |
668 | // **** Done! Indicate to ETW helpers that the heap walk is done, so any buffers |
669 | // should be flushed into the ETW stream |
670 | if (fShouldWalkHeapObjectsForEtw || fShouldWalkHeapRootsForEtw) |
671 | { |
672 | ETW::GCLog::EndHeapDump(&profilerWalkHeapContext); |
673 | } |
674 | #endif // FEATURE_EVENT_TRACE |
675 | } |
676 | } |
677 | #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
678 | |
679 | void GCProfileWalkHeap() |
680 | { |
681 | BOOL fWalkedHeapForProfiler = FALSE; |
682 | |
683 | #ifdef FEATURE_EVENT_TRACE |
684 | if (ETW::GCLog::ShouldWalkStaticsAndCOMForEtw()) |
685 | ETW::GCLog::WalkStaticsAndCOMForETW(); |
686 | |
687 | BOOL fShouldWalkHeapRootsForEtw = ETW::GCLog::ShouldWalkHeapRootsForEtw(); |
688 | BOOL fShouldWalkHeapObjectsForEtw = ETW::GCLog::ShouldWalkHeapObjectsForEtw(); |
689 | #else // !FEATURE_EVENT_TRACE |
690 | BOOL fShouldWalkHeapRootsForEtw = FALSE; |
691 | BOOL fShouldWalkHeapObjectsForEtw = FALSE; |
692 | #endif // FEATURE_EVENT_TRACE |
693 | |
694 | #if defined (GC_PROFILING) |
695 | { |
696 | BEGIN_PIN_PROFILER(CORProfilerTrackGC()); |
697 | GCProfileWalkHeapWorker(TRUE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw); |
698 | fWalkedHeapForProfiler = TRUE; |
699 | END_PIN_PROFILER(); |
700 | } |
701 | #endif // defined (GC_PROFILING) |
702 | |
703 | #if defined (GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
704 | // we need to walk the heap if one of GC_PROFILING or FEATURE_EVENT_TRACE |
705 | // is defined, since both of them make use of the walk heap worker. |
706 | if (!fWalkedHeapForProfiler && |
707 | (fShouldWalkHeapRootsForEtw || fShouldWalkHeapObjectsForEtw)) |
708 | { |
709 | GCProfileWalkHeapWorker(FALSE /* fProfilerPinned */, fShouldWalkHeapRootsForEtw, fShouldWalkHeapObjectsForEtw); |
710 | } |
711 | #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
712 | } |
713 | |
714 | void WalkFReachableObjects(bool isCritical, void* objectID) |
715 | { |
716 | g_profControlBlock.pProfInterface->FinalizeableObjectQueued(isCritical, (ObjectID)objectID); |
717 | } |
718 | |
719 | static fq_walk_fn g_FQWalkFn = &WalkFReachableObjects; |
720 | |
721 | void GCToEEInterface::DiagGCStart(int gen, bool isInduced) |
722 | { |
723 | #ifdef GC_PROFILING |
724 | DiagUpdateGenerationBounds(); |
725 | GarbageCollectionStartedCallback(gen, isInduced); |
726 | { |
727 | BEGIN_PIN_PROFILER(CORProfilerTrackGC()); |
728 | size_t context = 0; |
729 | |
730 | // When we're walking objects allocated by class, then we don't want to walk the large |
731 | // object heap because then it would count things that may have been around for a while. |
732 | GCHeapUtilities::GetGCHeap()->DiagWalkHeap(&AllocByClassHelper, (void *)&context, 0, false); |
733 | |
734 | // Notify that we've reached the end of the Gen 0 scan |
735 | g_profControlBlock.pProfInterface->EndAllocByClass(&context); |
736 | END_PIN_PROFILER(); |
737 | } |
738 | |
739 | #endif // GC_PROFILING |
740 | } |
741 | |
742 | void GCToEEInterface::DiagUpdateGenerationBounds() |
743 | { |
744 | #ifdef GC_PROFILING |
745 | if (CORProfilerTrackGC()) |
746 | UpdateGenerationBounds(); |
747 | #endif // GC_PROFILING |
748 | } |
749 | |
750 | void GCToEEInterface::DiagGCEnd(size_t index, int gen, int reason, bool fConcurrent) |
751 | { |
752 | #ifdef GC_PROFILING |
753 | if (!fConcurrent) |
754 | { |
755 | GCProfileWalkHeap(); |
756 | DiagUpdateGenerationBounds(); |
757 | GarbageCollectionFinishedCallback(); |
758 | } |
759 | #endif // GC_PROFILING |
760 | } |
761 | |
762 | void GCToEEInterface::DiagWalkFReachableObjects(void* gcContext) |
763 | { |
764 | #ifdef GC_PROFILING |
765 | if (CORProfilerTrackGC()) |
766 | { |
767 | BEGIN_PIN_PROFILER(CORProfilerPresent()); |
768 | GCHeapUtilities::GetGCHeap()->DiagWalkFinalizeQueue(gcContext, g_FQWalkFn); |
769 | END_PIN_PROFILER(); |
770 | } |
771 | #endif //GC_PROFILING |
772 | } |
773 | |
774 | // Note on last parameter: when calling this for bgc, only ETW |
775 | // should be sending these events so that existing profapi profilers |
776 | // don't get confused. |
777 | void WalkMovedReferences(uint8_t* begin, uint8_t* end, |
778 | ptrdiff_t reloc, |
779 | void* context, |
780 | bool fCompacting, |
781 | bool fBGC) |
782 | { |
783 | ETW::GCLog::MovedReference(begin, end, |
784 | (fCompacting ? reloc : 0), |
785 | (size_t)context, |
786 | fCompacting, |
787 | !fBGC); |
788 | } |
789 | |
790 | void GCToEEInterface::DiagWalkSurvivors(void* gcContext) |
791 | { |
792 | #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
793 | if (ShouldTrackMovementForProfilerOrEtw()) |
794 | { |
795 | size_t context = 0; |
796 | ETW::GCLog::BeginMovedReferences(&context); |
797 | GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_gc); |
798 | ETW::GCLog::EndMovedReferences(context); |
799 | } |
800 | #endif //GC_PROFILING || FEATURE_EVENT_TRACE |
801 | } |
802 | |
803 | void GCToEEInterface::DiagWalkLOHSurvivors(void* gcContext) |
804 | { |
805 | #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
806 | if (ShouldTrackMovementForProfilerOrEtw()) |
807 | { |
808 | size_t context = 0; |
809 | ETW::GCLog::BeginMovedReferences(&context); |
810 | GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_loh); |
811 | ETW::GCLog::EndMovedReferences(context); |
812 | } |
813 | #endif //GC_PROFILING || FEATURE_EVENT_TRACE |
814 | } |
815 | |
816 | void GCToEEInterface::DiagWalkBGCSurvivors(void* gcContext) |
817 | { |
818 | #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
819 | if (ShouldTrackMovementForProfilerOrEtw()) |
820 | { |
821 | size_t context = 0; |
822 | ETW::GCLog::BeginMovedReferences(&context); |
823 | GCHeapUtilities::GetGCHeap()->DiagWalkSurvivorsWithType(gcContext, &WalkMovedReferences, (void*)context, walk_for_bgc); |
824 | ETW::GCLog::EndMovedReferences(context); |
825 | } |
826 | #endif //GC_PROFILING || FEATURE_EVENT_TRACE |
827 | } |
828 | |
829 | void GCToEEInterface::StompWriteBarrier(WriteBarrierParameters* args) |
830 | { |
831 | int stompWBCompleteActions = SWB_PASS; |
832 | bool is_runtime_suspended = false; |
833 | |
834 | assert(args != nullptr); |
835 | switch (args->operation) |
836 | { |
837 | case WriteBarrierOp::StompResize: |
838 | // StompResize requires a new card table, a new lowest address, and |
839 | // a new highest address |
840 | assert(args->card_table != nullptr); |
841 | assert(args->lowest_address != nullptr); |
842 | assert(args->highest_address != nullptr); |
843 | |
844 | g_card_table = args->card_table; |
845 | |
846 | #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES |
847 | assert(args->card_bundle_table != nullptr); |
848 | g_card_bundle_table = args->card_bundle_table; |
849 | #endif |
850 | |
851 | #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP |
852 | if (g_sw_ww_enabled_for_gc_heap && (args->write_watch_table != nullptr)) |
853 | { |
854 | assert(args->is_runtime_suspended); |
855 | g_sw_ww_table = args->write_watch_table; |
856 | } |
857 | #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP |
858 | |
859 | stompWBCompleteActions |= ::StompWriteBarrierResize(args->is_runtime_suspended, args->requires_upper_bounds_check); |
860 | |
861 | // We need to make sure that other threads executing checked write barriers |
862 | // will see the g_card_table update before g_lowest/highest_address updates. |
863 | // Otherwise, the checked write barrier may AV accessing the old card table |
864 | // with address that it does not cover. |
865 | // |
866 | // Even x86's total store ordering is insufficient here because threads reading |
867 | // g_card_table do so via the instruction cache, whereas g_lowest/highest_address |
868 | // are read via the data cache. |
869 | // |
870 | // The g_card_table update is covered by section 8.1.3 of the Intel Software |
871 | // Development Manual, Volume 3A (System Programming Guide, Part 1), about |
872 | // "cross-modifying code": We need all _executing_ threads to invalidate |
873 | // their instruction cache, which FlushProcessWriteBuffers achieves by sending |
874 | // an IPI (inter-process interrupt). |
875 | |
876 | if (stompWBCompleteActions & SWB_ICACHE_FLUSH) |
877 | { |
878 | // flushing icache on current processor (thread) |
879 | ::FlushWriteBarrierInstructionCache(); |
880 | // asking other processors (threads) to invalidate their icache |
881 | FlushProcessWriteBuffers(); |
882 | } |
883 | |
884 | g_lowest_address = args->lowest_address; |
885 | VolatileStore(&g_highest_address, args->highest_address); |
886 | |
887 | #if defined(_ARM64_) || defined(_ARM_) |
888 | // Need to reupdate for changes to g_highest_address g_lowest_address |
889 | is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended; |
890 | stompWBCompleteActions |= ::StompWriteBarrierResize(is_runtime_suspended, args->requires_upper_bounds_check); |
891 | |
892 | #ifdef _ARM_ |
893 | if (stompWBCompleteActions & SWB_ICACHE_FLUSH) |
894 | { |
895 | ::FlushWriteBarrierInstructionCache(); |
896 | } |
897 | #endif |
898 | |
899 | is_runtime_suspended = (stompWBCompleteActions & SWB_EE_RESTART) || args->is_runtime_suspended; |
900 | if(!is_runtime_suspended) |
901 | { |
902 | // If runtime is not suspended, force updated state to be visible to all threads |
903 | MemoryBarrier(); |
904 | } |
905 | #endif |
906 | if (stompWBCompleteActions & SWB_EE_RESTART) |
907 | { |
908 | assert(!args->is_runtime_suspended && |
909 | "if runtime was suspended in patching routines then it was in running state at begining" ); |
910 | ThreadSuspend::RestartEE(FALSE, TRUE); |
911 | } |
912 | return; // unlike other branches we have already done cleanup so bailing out here |
913 | case WriteBarrierOp::StompEphemeral: |
914 | // StompEphemeral requires a new ephemeral low and a new ephemeral high |
915 | assert(args->ephemeral_low != nullptr); |
916 | assert(args->ephemeral_high != nullptr); |
917 | g_ephemeral_low = args->ephemeral_low; |
918 | g_ephemeral_high = args->ephemeral_high; |
919 | stompWBCompleteActions |= ::StompWriteBarrierEphemeral(args->is_runtime_suspended); |
920 | break; |
921 | case WriteBarrierOp::Initialize: |
922 | // This operation should only be invoked once, upon initialization. |
923 | assert(g_card_table == nullptr); |
924 | assert(g_lowest_address == nullptr); |
925 | assert(g_highest_address == nullptr); |
926 | assert(args->card_table != nullptr); |
927 | assert(args->lowest_address != nullptr); |
928 | assert(args->highest_address != nullptr); |
929 | assert(args->ephemeral_low != nullptr); |
930 | assert(args->ephemeral_high != nullptr); |
931 | assert(args->is_runtime_suspended && "the runtime must be suspended here!" ); |
932 | assert(!args->requires_upper_bounds_check && "the ephemeral generation must be at the top of the heap!" ); |
933 | |
934 | g_card_table = args->card_table; |
935 | |
936 | #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES |
937 | assert(g_card_bundle_table == nullptr); |
938 | g_card_bundle_table = args->card_bundle_table; |
939 | #endif |
940 | |
941 | g_lowest_address = args->lowest_address; |
942 | g_highest_address = args->highest_address; |
943 | stompWBCompleteActions |= ::StompWriteBarrierResize(true, false); |
944 | |
945 | // StompWriteBarrierResize does not necessarily bash g_ephemeral_low |
946 | // usages, so we must do so here. This is particularly true on x86, |
947 | // where StompWriteBarrierResize will not bash g_ephemeral_low when |
948 | // called with the parameters (true, false), as it is above. |
949 | g_ephemeral_low = args->ephemeral_low; |
950 | g_ephemeral_high = args->ephemeral_high; |
951 | stompWBCompleteActions |= ::StompWriteBarrierEphemeral(true); |
952 | break; |
953 | case WriteBarrierOp::SwitchToWriteWatch: |
954 | #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP |
955 | assert(args->write_watch_table != nullptr); |
956 | assert(args->is_runtime_suspended && "the runtime must be suspended here!" ); |
957 | g_sw_ww_table = args->write_watch_table; |
958 | g_sw_ww_enabled_for_gc_heap = true; |
959 | stompWBCompleteActions |= ::SwitchToWriteWatchBarrier(true); |
960 | #else |
961 | assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP" ); |
962 | #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP |
963 | break; |
964 | case WriteBarrierOp::SwitchToNonWriteWatch: |
965 | #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP |
966 | assert(args->is_runtime_suspended && "the runtime must be suspended here!" ); |
967 | g_sw_ww_table = 0; |
968 | g_sw_ww_enabled_for_gc_heap = false; |
969 | stompWBCompleteActions |= ::SwitchToNonWriteWatchBarrier(true); |
970 | #else |
971 | assert(!"should never be called without FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP" ); |
972 | #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP |
973 | break; |
974 | default: |
975 | assert(!"unknown WriteBarrierOp enum" ); |
976 | } |
977 | if (stompWBCompleteActions & SWB_ICACHE_FLUSH) |
978 | { |
979 | ::FlushWriteBarrierInstructionCache(); |
980 | } |
981 | if (stompWBCompleteActions & SWB_EE_RESTART) |
982 | { |
983 | assert(!args->is_runtime_suspended && |
984 | "if runtime was suspended in patching routines then it was in running state at begining" ); |
985 | ThreadSuspend::RestartEE(FALSE, TRUE); |
986 | } |
987 | } |
988 | |
989 | void GCToEEInterface::EnableFinalization(bool foundFinalizers) |
990 | { |
991 | if (foundFinalizers || FinalizerThread::HaveExtraWorkForFinalizer()) |
992 | { |
993 | FinalizerThread::EnableFinalization(); |
994 | } |
995 | } |
996 | |
997 | void GCToEEInterface::HandleFatalError(unsigned int exitCode) |
998 | { |
999 | EEPOLICY_HANDLE_FATAL_ERROR(exitCode); |
1000 | } |
1001 | |
1002 | bool GCToEEInterface::ShouldFinalizeObjectForUnload(void* pDomain, Object* obj) |
1003 | { |
1004 | // CoreCLR does not have appdomains, so this code path is dead. Other runtimes may |
1005 | // choose to inspect the object being finalized here. |
1006 | // [DESKTOP TODO] Desktop looks for "agile and finalizable" objects and may choose |
1007 | // to move them to a new app domain instead of finalizing them here. |
1008 | return true; |
1009 | } |
1010 | |
1011 | bool GCToEEInterface::EagerFinalized(Object* obj) |
1012 | { |
1013 | MethodTable* pMT = obj->GetGCSafeMethodTable(); |
1014 | if (pMT == pWeakReferenceMT || |
1015 | pMT->GetCanonicalMethodTable() == pWeakReferenceOfTCanonMT) |
1016 | { |
1017 | FinalizeWeakReference(obj); |
1018 | return true; |
1019 | } |
1020 | |
1021 | return false; |
1022 | } |
1023 | |
1024 | MethodTable* GCToEEInterface::GetFreeObjectMethodTable() |
1025 | { |
1026 | assert(g_pFreeObjectMethodTable != nullptr); |
1027 | return g_pFreeObjectMethodTable; |
1028 | } |
1029 | |
1030 | // These are arbitrary, we shouldn't ever be having confrig keys or values |
1031 | // longer than these lengths. |
1032 | const size_t MaxConfigKeyLength = 255; |
1033 | const size_t MaxConfigValueLength = 255; |
1034 | |
1035 | bool GCToEEInterface::GetBooleanConfigValue(const char* key, bool* value) |
1036 | { |
1037 | CONTRACTL { |
1038 | NOTHROW; |
1039 | GC_NOTRIGGER; |
1040 | } CONTRACTL_END; |
1041 | |
1042 | // these configuration values are given to us via startup flags. |
1043 | if (strcmp(key, "gcServer" ) == 0) |
1044 | { |
1045 | *value = g_heap_type == GC_HEAP_SVR; |
1046 | return true; |
1047 | } |
1048 | |
1049 | if (strcmp(key, "gcConcurrent" ) == 0) |
1050 | { |
1051 | *value = !!g_pConfig->GetGCconcurrent(); |
1052 | return true; |
1053 | } |
1054 | |
1055 | if (strcmp(key, "GCRetainVM" ) == 0) |
1056 | { |
1057 | *value = !!g_pConfig->GetGCRetainVM(); |
1058 | return true; |
1059 | } |
1060 | |
1061 | WCHAR configKey[MaxConfigKeyLength]; |
1062 | if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0) |
1063 | { |
1064 | // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.) |
1065 | return false; |
1066 | } |
1067 | |
1068 | // otherwise, ask the config subsystem. |
1069 | if (CLRConfig::IsConfigOptionSpecified(configKey)) |
1070 | { |
1071 | CLRConfig::ConfigDWORDInfo info { configKey , 0, CLRConfig::EEConfig_default }; |
1072 | *value = CLRConfig::GetConfigValue(info) != 0; |
1073 | return true; |
1074 | } |
1075 | |
1076 | return false; |
1077 | } |
1078 | |
1079 | bool GCToEEInterface::GetIntConfigValue(const char* key, int64_t* value) |
1080 | { |
1081 | CONTRACTL { |
1082 | NOTHROW; |
1083 | GC_NOTRIGGER; |
1084 | } CONTRACTL_END; |
1085 | |
1086 | if (strcmp(key, "GCSegmentSize" ) == 0) |
1087 | { |
1088 | *value = g_pConfig->GetSegmentSize(); |
1089 | return true; |
1090 | } |
1091 | |
1092 | if (strcmp(key, "GCgen0size" ) == 0) |
1093 | { |
1094 | *value = g_pConfig->GetGCgen0size(); |
1095 | return true; |
1096 | } |
1097 | |
1098 | if (strcmp(key, "GCLOHThreshold" ) == 0) |
1099 | { |
1100 | *value = g_pConfig->GetGCLOHThreshold(); |
1101 | return true; |
1102 | } |
1103 | |
1104 | WCHAR configKey[MaxConfigKeyLength]; |
1105 | if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0) |
1106 | { |
1107 | // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.) |
1108 | return false; |
1109 | } |
1110 | |
1111 | // There is no ConfigULONGLONGInfo, and the GC uses 64 bit values for things like GCHeapAffinitizeMask, |
1112 | // so have to fake it with getting the string and converting to uint64_t |
1113 | if (CLRConfig::IsConfigOptionSpecified(configKey)) |
1114 | { |
1115 | CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default }; |
1116 | LPWSTR out = CLRConfig::GetConfigValue(info); |
1117 | if (!out) |
1118 | { |
1119 | // config not found |
1120 | CLRConfig::FreeConfigString(out); |
1121 | return false; |
1122 | } |
1123 | |
1124 | wchar_t *end; |
1125 | uint64_t result; |
1126 | errno = 0; |
1127 | result = _wcstoui64(out, &end, 16); |
1128 | // errno is ERANGE if the number is out of range, and end is set to pvalue if |
1129 | // no valid conversion exists. |
1130 | if (errno == ERANGE || end == out) |
1131 | { |
1132 | CLRConfig::FreeConfigString(out); |
1133 | return false; |
1134 | } |
1135 | |
1136 | *value = static_cast<int64_t>(result); |
1137 | CLRConfig::FreeConfigString(out); |
1138 | return true; |
1139 | } |
1140 | |
1141 | return false; |
1142 | } |
1143 | |
1144 | bool GCToEEInterface::GetStringConfigValue(const char* key, const char** value) |
1145 | { |
1146 | CONTRACTL { |
1147 | NOTHROW; |
1148 | GC_NOTRIGGER; |
1149 | } CONTRACTL_END; |
1150 | |
1151 | WCHAR configKey[MaxConfigKeyLength]; |
1152 | if (MultiByteToWideChar(CP_ACP, 0, key, -1 /* key is null-terminated */, configKey, MaxConfigKeyLength) == 0) |
1153 | { |
1154 | // whatever this is... it's not something we care about. (It was too long, wasn't unicode, etc.) |
1155 | return false; |
1156 | } |
1157 | |
1158 | CLRConfig::ConfigStringInfo info { configKey, CLRConfig::EEConfig_default }; |
1159 | LPWSTR out = CLRConfig::GetConfigValue(info); |
1160 | if (!out) |
1161 | { |
1162 | // config not found |
1163 | return false; |
1164 | } |
1165 | |
1166 | // not allocated on the stack since it escapes this function |
1167 | AStringHolder configResult = new (nothrow) char[MaxConfigValueLength]; |
1168 | if (!configResult) |
1169 | { |
1170 | CLRConfig::FreeConfigString(out); |
1171 | return false; |
1172 | } |
1173 | |
1174 | if (WideCharToMultiByte(CP_ACP, 0, out, -1 /* out is null-terminated */, |
1175 | configResult.GetValue(), MaxConfigKeyLength, nullptr, nullptr) == 0) |
1176 | { |
1177 | // this should only happen if the config subsystem gives us a string that's not valid |
1178 | // unicode. |
1179 | CLRConfig::FreeConfigString(out); |
1180 | return false; |
1181 | } |
1182 | |
1183 | *value = configResult.Extract(); |
1184 | CLRConfig::FreeConfigString(out); |
1185 | return true; |
1186 | } |
1187 | |
1188 | void GCToEEInterface::FreeStringConfigValue(const char* value) |
1189 | { |
1190 | delete [] value; |
1191 | } |
1192 | |
1193 | bool GCToEEInterface::IsGCThread() |
1194 | { |
1195 | return !!::IsGCThread(); |
1196 | } |
1197 | |
1198 | bool GCToEEInterface::WasCurrentThreadCreatedByGC() |
1199 | { |
1200 | return !!::IsGCSpecialThread(); |
1201 | } |
1202 | |
1203 | struct SuspendableThreadStubArguments |
1204 | { |
1205 | void* Argument; |
1206 | void (*ThreadStart)(void*); |
1207 | Thread* Thread; |
1208 | bool HasStarted; |
1209 | CLREvent ThreadStartedEvent; |
1210 | }; |
1211 | |
1212 | struct ThreadStubArguments |
1213 | { |
1214 | void* Argument; |
1215 | void (*ThreadStart)(void*); |
1216 | HANDLE Thread; |
1217 | bool HasStarted; |
1218 | CLREvent ThreadStartedEvent; |
1219 | }; |
1220 | |
1221 | namespace |
1222 | { |
1223 | const size_t MaxThreadNameSize = 255; |
1224 | |
1225 | bool CreateSuspendableThread( |
1226 | void (*threadStart)(void*), |
1227 | void* argument, |
1228 | const wchar_t* name) |
1229 | { |
1230 | LIMITED_METHOD_CONTRACT; |
1231 | |
1232 | SuspendableThreadStubArguments args; |
1233 | args.Argument = argument; |
1234 | args.ThreadStart = threadStart; |
1235 | args.Thread = nullptr; |
1236 | args.HasStarted = false; |
1237 | if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE)) |
1238 | { |
1239 | return false; |
1240 | } |
1241 | |
1242 | EX_TRY |
1243 | { |
1244 | args.Thread = SetupUnstartedThread(FALSE); |
1245 | } |
1246 | EX_CATCH |
1247 | { |
1248 | } |
1249 | EX_END_CATCH(SwallowAllExceptions) |
1250 | |
1251 | if (!args.Thread) |
1252 | { |
1253 | args.ThreadStartedEvent.CloseEvent(); |
1254 | return false; |
1255 | } |
1256 | |
1257 | auto threadStub = [](void* argument) -> DWORD |
1258 | { |
1259 | SuspendableThreadStubArguments* args = static_cast<SuspendableThreadStubArguments*>(argument); |
1260 | assert(args != nullptr); |
1261 | |
1262 | ClrFlsSetThreadType(ThreadType_GC); |
1263 | args->Thread->SetGCSpecial(true); |
1264 | STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY); |
1265 | args->HasStarted = !!args->Thread->HasStarted(false); |
1266 | |
1267 | Thread* thread = args->Thread; |
1268 | auto threadStart = args->ThreadStart; |
1269 | void* threadArgument = args->Argument; |
1270 | bool hasStarted = args->HasStarted; |
1271 | args->ThreadStartedEvent.Set(); |
1272 | |
1273 | // The stubArgs cannot be used once the event is set, since that releases wait on the |
1274 | // event in the function that created this thread and the stubArgs go out of scope. |
1275 | if (hasStarted) |
1276 | { |
1277 | threadStart(threadArgument); |
1278 | DestroyThread(thread); |
1279 | } |
1280 | |
1281 | return 0; |
1282 | }; |
1283 | if (!args.Thread->CreateNewThread(0, threadStub, &args, name)) |
1284 | { |
1285 | args.Thread->DecExternalCount(FALSE); |
1286 | args.ThreadStartedEvent.CloseEvent(); |
1287 | return false; |
1288 | } |
1289 | |
1290 | args.Thread->SetBackground(TRUE, FALSE); |
1291 | args.Thread->StartThread(); |
1292 | |
1293 | // Wait for the thread to be in its main loop |
1294 | uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE); |
1295 | args.ThreadStartedEvent.CloseEvent(); |
1296 | _ASSERTE(res == WAIT_OBJECT_0); |
1297 | |
1298 | if (!args.HasStarted) |
1299 | { |
1300 | // The thread has failed to start and the Thread object was destroyed in the Thread::HasStarted |
1301 | // failure code path. |
1302 | return false; |
1303 | } |
1304 | |
1305 | return true; |
1306 | } |
1307 | |
1308 | bool CreateNonSuspendableThread( |
1309 | void (*threadStart)(void*), |
1310 | void* argument, |
1311 | const wchar_t* name) |
1312 | { |
1313 | LIMITED_METHOD_CONTRACT; |
1314 | |
1315 | ThreadStubArguments args; |
1316 | args.Argument = argument; |
1317 | args.ThreadStart = threadStart; |
1318 | args.Thread = INVALID_HANDLE_VALUE; |
1319 | if (!args.ThreadStartedEvent.CreateAutoEventNoThrow(FALSE)) |
1320 | { |
1321 | return false; |
1322 | } |
1323 | |
1324 | auto threadStub = [](void* argument) -> DWORD |
1325 | { |
1326 | ThreadStubArguments* args = static_cast<ThreadStubArguments*>(argument); |
1327 | assert(args != nullptr); |
1328 | |
1329 | ClrFlsSetThreadType(ThreadType_GC); |
1330 | STRESS_LOG_RESERVE_MEM(GC_STRESSLOG_MULTIPLY); |
1331 | |
1332 | args->HasStarted = true; |
1333 | auto threadStart = args->ThreadStart; |
1334 | void* threadArgument = args->Argument; |
1335 | args->ThreadStartedEvent.Set(); |
1336 | |
1337 | // The stub args cannot be used once the event is set, since that releases wait on the |
1338 | // event in the function that created this thread and the stubArgs go out of scope. |
1339 | threadStart(threadArgument); |
1340 | return 0; |
1341 | }; |
1342 | |
1343 | args.Thread = Thread::CreateUtilityThread(Thread::StackSize_Medium, threadStub, &args, name); |
1344 | if (args.Thread == INVALID_HANDLE_VALUE) |
1345 | { |
1346 | args.ThreadStartedEvent.CloseEvent(); |
1347 | return false; |
1348 | } |
1349 | |
1350 | // Wait for the thread to be in its main loop |
1351 | uint32_t res = args.ThreadStartedEvent.Wait(INFINITE, FALSE); |
1352 | args.ThreadStartedEvent.CloseEvent(); |
1353 | _ASSERTE(res == WAIT_OBJECT_0); |
1354 | |
1355 | CloseHandle(args.Thread); |
1356 | return true; |
1357 | } |
1358 | } // anonymous namespace |
1359 | |
1360 | bool GCToEEInterface::CreateThread(void (*threadStart)(void*), void* arg, bool is_suspendable, const char* name) |
1361 | { |
1362 | InlineSString<MaxThreadNameSize> wideName; |
1363 | const WCHAR* namePtr = nullptr; |
1364 | EX_TRY |
1365 | { |
1366 | if (name != nullptr) |
1367 | { |
1368 | wideName.SetUTF8(name); |
1369 | namePtr = wideName.GetUnicode(); |
1370 | } |
1371 | } |
1372 | EX_CATCH |
1373 | { |
1374 | // we're not obligated to provide a name - if it's not valid, |
1375 | // just report nullptr as the name. |
1376 | } |
1377 | EX_END_CATCH(SwallowAllExceptions) |
1378 | |
1379 | LIMITED_METHOD_CONTRACT; |
1380 | if (is_suspendable) |
1381 | { |
1382 | return CreateSuspendableThread(threadStart, arg, namePtr); |
1383 | } |
1384 | else |
1385 | { |
1386 | return CreateNonSuspendableThread(threadStart, arg, namePtr); |
1387 | } |
1388 | } |
1389 | |
1390 | void GCToEEInterface::WalkAsyncPinnedForPromotion(Object* object, ScanContext* sc, promote_func* callback) |
1391 | { |
1392 | LIMITED_METHOD_CONTRACT; |
1393 | |
1394 | assert(object != nullptr); |
1395 | assert(sc != nullptr); |
1396 | assert(callback != nullptr); |
1397 | if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass) |
1398 | { |
1399 | // not an overlapped data object - nothing to do. |
1400 | return; |
1401 | } |
1402 | |
1403 | // reporting the pinned user objects |
1404 | OverlappedDataObject *pOverlapped = (OverlappedDataObject *)object; |
1405 | if (pOverlapped->m_userObject != NULL) |
1406 | { |
1407 | if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable()) |
1408 | { |
1409 | // OverlappedDataObject is very special. An async pin handle keeps it alive. |
1410 | // During GC, we also make sure |
1411 | // 1. m_userObject itself does not move if m_userObject is not array |
1412 | // 2. Every object pointed by m_userObject does not move if m_userObject is array |
1413 | // We do not want to pin m_userObject if it is array. |
1414 | ArrayBase* pUserObject = (ArrayBase*)OBJECTREFToObject(pOverlapped->m_userObject); |
1415 | Object **ppObj = (Object**)pUserObject->GetDataPtr(TRUE); |
1416 | size_t num = pUserObject->GetNumComponents(); |
1417 | for (size_t i = 0; i < num; i++) |
1418 | { |
1419 | callback(ppObj + i, sc, GC_CALL_PINNED); |
1420 | } |
1421 | } |
1422 | else |
1423 | { |
1424 | callback(&OBJECTREF_TO_UNCHECKED_OBJECTREF(pOverlapped->m_userObject), (ScanContext *)sc, GC_CALL_PINNED); |
1425 | } |
1426 | } |
1427 | } |
1428 | |
1429 | void GCToEEInterface::WalkAsyncPinned(Object* object, void* context, void (*callback)(Object*, Object*, void*)) |
1430 | { |
1431 | LIMITED_METHOD_CONTRACT; |
1432 | |
1433 | assert(object != nullptr); |
1434 | assert(callback != nullptr); |
1435 | |
1436 | if (object->GetGCSafeMethodTable() != g_pOverlappedDataClass) |
1437 | { |
1438 | return; |
1439 | } |
1440 | |
1441 | OverlappedDataObject *pOverlapped = (OverlappedDataObject *)(object); |
1442 | if (pOverlapped->m_userObject != NULL) |
1443 | { |
1444 | Object * pUserObject = OBJECTREFToObject(pOverlapped->m_userObject); |
1445 | callback(object, pUserObject, context); |
1446 | if (pOverlapped->m_userObject->GetGCSafeMethodTable() == g_pPredefinedArrayTypes[ELEMENT_TYPE_OBJECT]->GetMethodTable()) |
1447 | { |
1448 | ArrayBase* pUserArrayObject = (ArrayBase*)pUserObject; |
1449 | Object **pObj = (Object**)pUserArrayObject->GetDataPtr(TRUE); |
1450 | size_t num = pUserArrayObject->GetNumComponents(); |
1451 | for (size_t i = 0; i < num; i ++) |
1452 | { |
1453 | callback(pUserObject, pObj[i], context); |
1454 | } |
1455 | } |
1456 | } |
1457 | } |
1458 | |
1459 | IGCToCLREventSink* GCToEEInterface::EventSink() |
1460 | { |
1461 | LIMITED_METHOD_CONTRACT; |
1462 | |
1463 | return &g_gcToClrEventSink; |
1464 | } |
1465 | |
1466 | uint32_t GCToEEInterface::GetDefaultDomainIndex() |
1467 | { |
1468 | LIMITED_METHOD_CONTRACT; |
1469 | |
1470 | return SystemDomain::System()->DefaultDomain()->GetIndex().m_dwIndex; |
1471 | } |
1472 | |
1473 | void *GCToEEInterface::GetAppDomainAtIndex(uint32_t appDomainIndex) |
1474 | { |
1475 | LIMITED_METHOD_CONTRACT; |
1476 | |
1477 | ADIndex index(appDomainIndex); |
1478 | return static_cast<void *>(SystemDomain::GetAppDomainAtIndex(index)); |
1479 | } |
1480 | |
1481 | bool GCToEEInterface::AppDomainCanAccessHandleTable(uint32_t appDomainID) |
1482 | { |
1483 | LIMITED_METHOD_CONTRACT; |
1484 | |
1485 | ADIndex index(appDomainID); |
1486 | AppDomain *pDomain = SystemDomain::GetAppDomainAtIndex(index); |
1487 | return (pDomain != NULL); |
1488 | } |
1489 | |
1490 | uint32_t GCToEEInterface::GetIndexOfAppDomainBeingUnloaded() |
1491 | { |
1492 | LIMITED_METHOD_CONTRACT; |
1493 | |
1494 | return 0xFFFFFFFF; |
1495 | } |
1496 | |
1497 | uint32_t GCToEEInterface::GetTotalNumSizedRefHandles() |
1498 | { |
1499 | LIMITED_METHOD_CONTRACT; |
1500 | |
1501 | return SystemDomain::System()->GetTotalNumSizedRefHandles(); |
1502 | } |
1503 | |
1504 | |
1505 | bool GCToEEInterface::AppDomainIsRudeUnload(void *appDomain) |
1506 | { |
1507 | LIMITED_METHOD_CONTRACT; |
1508 | |
1509 | return false; |
1510 | } |
1511 | |
1512 | bool GCToEEInterface::AnalyzeSurvivorsRequested(int condemnedGeneration) |
1513 | { |
1514 | LIMITED_METHOD_CONTRACT; |
1515 | |
1516 | // Is the list active? |
1517 | GcNotifications gn(g_pGcNotificationTable); |
1518 | if (gn.IsActive()) |
1519 | { |
1520 | GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } }; |
1521 | if (gn.GetNotification(gea) != 0) |
1522 | { |
1523 | return true; |
1524 | } |
1525 | } |
1526 | |
1527 | return false; |
1528 | } |
1529 | |
1530 | void GCToEEInterface::AnalyzeSurvivorsFinished(int condemnedGeneration) |
1531 | { |
1532 | LIMITED_METHOD_CONTRACT; |
1533 | |
1534 | // Is the list active? |
1535 | GcNotifications gn(g_pGcNotificationTable); |
1536 | if (gn.IsActive()) |
1537 | { |
1538 | GcEvtArgs gea = { GC_MARK_END, { (1<<condemnedGeneration) } }; |
1539 | if (gn.GetNotification(gea) != 0) |
1540 | { |
1541 | DACNotify::DoGCNotification(gea); |
1542 | } |
1543 | } |
1544 | } |
1545 | |
1546 | void GCToEEInterface::VerifySyncTableEntry() |
1547 | { |
1548 | LIMITED_METHOD_CONTRACT; |
1549 | |
1550 | #ifdef VERIFY_HEAP |
1551 | SyncBlockCache::GetSyncBlockCache()->VerifySyncTableEntry(); |
1552 | #endif // VERIFY_HEAP |
1553 | } |
1554 | |