1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4// ===========================================================================
5// File: TieredCompilation.CPP
6//
7// ===========================================================================
8
9
10
11#include "common.h"
12#include "excep.h"
13#include "log.h"
14#include "win32threadpool.h"
15#include "threadsuspend.h"
16#include "tieredcompilation.h"
17
18// TieredCompilationManager determines which methods should be recompiled and
19// how they should be recompiled to best optimize the running code. It then
20// handles logistics of getting new code created and installed.
21//
22//
23// # Important entrypoints in this code:
24//
25//
26// a) .ctor and Init(...) - called once during AppDomain initialization
27// b) OnMethodCalled(...) - called when a method is being invoked. When a method
28// has been called enough times this is currently the only
29// trigger that initiates re-compilation.
30// c) Shutdown() - called during AppDomain::Exit() to begin the process
31// of stopping tiered compilation. After this point no more
32// background optimization work will be initiated but in-progress
33// work still needs to complete.
34// d) ShutdownAllDomains() - Called from EEShutdownHelper to block until all async work is
35// complete. We must do this before we shutdown the JIT.
36//
37// # Overall workflow
38//
39// Methods initially call into OnMethodCalled() and once the call count exceeds
40// a fixed limit we queue work on to our internal list of methods needing to
41// be recompiled (m_methodsToOptimize). If there is currently no thread
42// servicing our queue asynchronously then we use the runtime threadpool
43// QueueUserWorkItem to recruit one. During the callback for each threadpool work
44// item we handle as many methods as possible in a fixed period of time, then
45// queue another threadpool work item if m_methodsToOptimize hasn't been drained.
46//
47// The background thread enters at StaticOptimizeMethodsCallback(), enters the
48// appdomain, and then begins calling OptimizeMethod on each method in the
49// queue. For each method we jit it, then update the precode so that future
50// entrypoint callers will run the new code.
51//
52// # Error handling
53//
54// The overall principle is don't swallow terminal failures that may have corrupted the
55// process (AV for example), but otherwise for any transient issue or functional limitation
56// that prevents us from optimizing log it for diagnostics and then back out gracefully,
57// continuing to run the less optimal code. The feature should be constructed so that
58// errors are limited to OS resource exhaustion or poorly behaved managed code
59// (for example within an AssemblyResolve event or static constructor triggered by the JIT).
60
61#if defined(FEATURE_TIERED_COMPILATION) && !defined(DACCESS_COMPILE)
62
63// Called at AppDomain construction
64TieredCompilationManager::TieredCompilationManager() :
65 m_lock(CrstTieredCompilation),
66 m_isAppDomainShuttingDown(FALSE),
67 m_countOptimizationThreadsRunning(0),
68 m_callCountOptimizationThreshhold(1),
69 m_optimizationQuantumMs(50),
70 m_methodsPendingCountingForTier1(nullptr),
71 m_tieringDelayTimerHandle(nullptr),
72 m_tier1CallCountingCandidateMethodRecentlyRecorded(false)
73{
74 WRAPPER_NO_CONTRACT;
75 // On Unix, we can reach here before EEConfig is initialized, so defer config-based initialization to Init()
76}
77
78// Called at AppDomain Init
79void TieredCompilationManager::Init(ADID appDomainId)
80{
81 CONTRACTL
82 {
83 GC_NOTRIGGER;
84 CAN_TAKE_LOCK;
85 MODE_PREEMPTIVE;
86 }
87 CONTRACTL_END;
88
89 CrstHolder holder(&m_lock);
90 m_domainId = appDomainId;
91 m_callCountOptimizationThreshhold = g_pConfig->TieredCompilation_Tier1CallCountThreshold();
92}
93
94#endif // FEATURE_TIERED_COMPILATION && !DACCESS_COMPILE
95
96NativeCodeVersion::OptimizationTier TieredCompilationManager::GetInitialOptimizationTier(PTR_MethodDesc pMethodDesc)
97{
98 WRAPPER_NO_CONTRACT;
99 _ASSERTE(pMethodDesc != NULL);
100
101#ifdef FEATURE_TIERED_COMPILATION
102 if (pMethodDesc->RequestedAggressiveOptimization())
103 {
104 // Methods flagged with MethodImplOptions.AggressiveOptimization begin at tier 1, as a workaround to cold methods with
105 // hot loops performing poorly (https://github.com/dotnet/coreclr/issues/19751)
106 return NativeCodeVersion::OptimizationTier1;
107 }
108#endif // FEATURE_TIERED_COMPILATION
109
110 return NativeCodeVersion::OptimizationTier0;
111}
112
113#if defined(FEATURE_TIERED_COMPILATION) && !defined(DACCESS_COMPILE)
114
115bool TieredCompilationManager::RequiresCallCounting(MethodDesc* pMethodDesc)
116{
117 WRAPPER_NO_CONTRACT;
118 _ASSERTE(pMethodDesc != NULL);
119 _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation());
120
121 return
122 g_pConfig->TieredCompilation_CallCounting() &&
123 GetInitialOptimizationTier(pMethodDesc) == NativeCodeVersion::OptimizationTier0;
124}
125
126// Called each time code in this AppDomain has been run. This is our sole entrypoint to begin
127// tiered compilation for now. Returns TRUE if no more notifications are necessary, but
128// more notifications may come anyways.
129//
130// currentCallCount is pre-incremented, that is to say the value is 1 on first call for a given
131// method.
132void TieredCompilationManager::OnMethodCalled(
133 MethodDesc* pMethodDesc,
134 DWORD currentCallCount,
135 BOOL* shouldStopCountingCallsRef,
136 BOOL* wasPromotedToTier1Ref)
137{
138 WRAPPER_NO_CONTRACT;
139 _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation());
140 _ASSERTE(shouldStopCountingCallsRef != nullptr);
141 _ASSERTE(wasPromotedToTier1Ref != nullptr);
142
143 *shouldStopCountingCallsRef =
144 // Stop call counting when the delay is in effect
145 IsTieringDelayActive() ||
146 // Initiate the delay on tier 0 activity (when a new eligible method is called the first time)
147 (currentCallCount == 1 && g_pConfig->TieredCompilation_Tier1CallCountingDelayMs() != 0) ||
148 // Stop call counting when ready for tier 1 promotion
149 currentCallCount >= m_callCountOptimizationThreshhold;
150
151 *wasPromotedToTier1Ref = currentCallCount >= m_callCountOptimizationThreshhold;
152
153 if (currentCallCount == m_callCountOptimizationThreshhold)
154 {
155 AsyncPromoteMethodToTier1(pMethodDesc);
156 }
157}
158
159void TieredCompilationManager::OnMethodCallCountingStoppedWithoutTier1Promotion(MethodDesc* pMethodDesc)
160{
161 WRAPPER_NO_CONTRACT;
162 _ASSERTE(pMethodDesc != nullptr);
163 _ASSERTE(pMethodDesc->IsEligibleForTieredCompilation());
164
165 if (g_pConfig->TieredCompilation_Tier1CallCountingDelayMs() == 0)
166 {
167 return;
168 }
169
170 while (true)
171 {
172 bool attemptedToInitiateDelay = false;
173 if (!IsTieringDelayActive())
174 {
175 if (!TryInitiateTieringDelay())
176 {
177 break;
178 }
179 attemptedToInitiateDelay = true;
180 }
181
182 {
183 CrstHolder holder(&m_lock);
184
185 SArray<MethodDesc*>* methodsPendingCountingForTier1 = m_methodsPendingCountingForTier1;
186 if (methodsPendingCountingForTier1 == nullptr)
187 {
188 // Timer tick callback race, try again
189 continue;
190 }
191
192 // Record the method to resume counting later (see Tier1DelayTimerCallback)
193 bool success = false;
194 EX_TRY
195 {
196 methodsPendingCountingForTier1->Append(pMethodDesc);
197 success = true;
198 }
199 EX_CATCH
200 {
201 }
202 EX_END_CATCH(RethrowTerminalExceptions);
203 if (!success)
204 {
205 break;
206 }
207
208 if (!attemptedToInitiateDelay)
209 {
210 // Delay call counting for currently recoded methods further
211 m_tier1CallCountingCandidateMethodRecentlyRecorded = true;
212 }
213 }
214 return;
215 }
216
217 ResumeCountingCalls(pMethodDesc);
218}
219
220void TieredCompilationManager::AsyncPromoteMethodToTier1(MethodDesc* pMethodDesc)
221{
222 STANDARD_VM_CONTRACT;
223
224 NativeCodeVersion t1NativeCodeVersion;
225
226 // Add an inactive native code entry in the versioning table to track the tier1
227 // compilation we are going to create. This entry binds the compilation to a
228 // particular version of the IL code regardless of any changes that may
229 // occur between now and when jitting completes. If the IL does change in that
230 // interval the new code entry won't be activated.
231 {
232 CodeVersionManager* pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
233 CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
234 ILCodeVersion ilVersion = pCodeVersionManager->GetActiveILCodeVersion(pMethodDesc);
235 NativeCodeVersionCollection nativeVersions = ilVersion.GetNativeCodeVersions(pMethodDesc);
236 for (NativeCodeVersionIterator cur = nativeVersions.Begin(), end = nativeVersions.End(); cur != end; cur++)
237 {
238 if (cur->GetOptimizationTier() == NativeCodeVersion::OptimizationTier1)
239 {
240 // we've already promoted
241 LOG((LF_TIEREDCOMPILATION, LL_INFO100000, "TieredCompilationManager::AsyncPromoteMethodToTier1 Method=0x%pM (%s::%s) ignoring already promoted method\n",
242 pMethodDesc, pMethodDesc->m_pszDebugClassName, pMethodDesc->m_pszDebugMethodName));
243 return;
244 }
245 }
246
247 HRESULT hr = S_OK;
248 if (FAILED(hr = ilVersion.AddNativeCodeVersion(pMethodDesc, NativeCodeVersion::OptimizationTier1, &t1NativeCodeVersion)))
249 {
250 // optimization didn't work for some reason (presumably OOM)
251 // just give up and continue on
252 STRESS_LOG2(LF_TIEREDCOMPILATION, LL_WARNING, "TieredCompilationManager::AsyncPromoteMethodToTier1: "
253 "AddNativeCodeVersion failed hr=0x%x, method=%pM\n",
254 hr, pMethodDesc);
255 return;
256 }
257 }
258
259 // Insert the method into the optimization queue and trigger a thread to service
260 // the queue if needed.
261 //
262 // Note an error here could affect concurrent threads running this
263 // code. Those threads will observe m_countOptimizationThreadsRunning > 0 and return,
264 // then QueueUserWorkItem fails on this thread lowering the count and leaves them
265 // unserviced. Synchronous retries appear unlikely to offer any material improvement
266 // and complicating the code to narrow an already rare error case isn't desirable.
267 {
268 SListElem<NativeCodeVersion>* pMethodListItem = new (nothrow) SListElem<NativeCodeVersion>(t1NativeCodeVersion);
269 CrstHolder holder(&m_lock);
270 if (pMethodListItem != NULL)
271 {
272 m_methodsToOptimize.InsertTail(pMethodListItem);
273 }
274
275 LOG((LF_TIEREDCOMPILATION, LL_INFO10000, "TieredCompilationManager::AsyncPromoteMethodToTier1 Method=0x%pM (%s::%s), code version id=0x%x queued\n",
276 pMethodDesc, pMethodDesc->m_pszDebugClassName, pMethodDesc->m_pszDebugMethodName,
277 t1NativeCodeVersion.GetVersionId()));
278
279 if (!IncrementWorkerThreadCountIfNeeded())
280 {
281 return;
282 }
283 }
284
285 if (!TryAsyncOptimizeMethods())
286 {
287 CrstHolder holder(&m_lock);
288 DecrementWorkerThreadCount();
289 }
290}
291
292void TieredCompilationManager::Shutdown()
293{
294 STANDARD_VM_CONTRACT;
295
296 CrstHolder holder(&m_lock);
297 m_isAppDomainShuttingDown = TRUE;
298}
299
300bool TieredCompilationManager::IsTieringDelayActive()
301{
302 LIMITED_METHOD_CONTRACT;
303 return m_methodsPendingCountingForTier1 != nullptr;
304}
305
306bool TieredCompilationManager::TryInitiateTieringDelay()
307{
308 WRAPPER_NO_CONTRACT;
309 _ASSERTE(g_pConfig->TieredCompilation());
310 _ASSERTE(g_pConfig->TieredCompilation_Tier1CallCountingDelayMs() != 0);
311
312 NewHolder<SArray<MethodDesc*>> methodsPendingCountingHolder = new(nothrow) SArray<MethodDesc*>();
313 if (methodsPendingCountingHolder == nullptr)
314 {
315 return false;
316 }
317
318 bool success = false;
319 EX_TRY
320 {
321 methodsPendingCountingHolder->Preallocate(64);
322 success = true;
323 }
324 EX_CATCH
325 {
326 }
327 EX_END_CATCH(RethrowTerminalExceptions);
328 if (!success)
329 {
330 return false;
331 }
332
333 NewHolder<ThreadpoolMgr::TimerInfoContext> timerContextHolder = new(nothrow) ThreadpoolMgr::TimerInfoContext();
334 if (timerContextHolder == nullptr)
335 {
336 return false;
337 }
338 timerContextHolder->AppDomainId = m_domainId;
339 timerContextHolder->TimerId = 0;
340
341 {
342 CrstHolder holder(&m_lock);
343
344 if (IsTieringDelayActive())
345 {
346 return true;
347 }
348
349 // The timer is created inside the lock to avoid some unnecessary additional complexity that would otherwise arise from
350 // there being a failure point after the timer is successfully created. For instance, if the timer is created outside
351 // the lock and then inside the lock it is found that another thread beat us to it, there would be two active timers
352 // that may tick before the extra timer is deleted, along with additional concurrency issues.
353 _ASSERTE(m_tieringDelayTimerHandle == nullptr);
354 success = false;
355 EX_TRY
356 {
357 if (ThreadpoolMgr::CreateTimerQueueTimer(
358 &m_tieringDelayTimerHandle,
359 TieringDelayTimerCallback,
360 timerContextHolder,
361 g_pConfig->TieredCompilation_Tier1CallCountingDelayMs(),
362 (DWORD)-1 /* Period, non-repeating */,
363 0 /* flags */))
364 {
365 success = true;
366 }
367 }
368 EX_CATCH
369 {
370 }
371 EX_END_CATCH(RethrowTerminalExceptions);
372 if (!success)
373 {
374 _ASSERTE(m_tieringDelayTimerHandle == nullptr);
375 return false;
376 }
377
378 m_methodsPendingCountingForTier1 = methodsPendingCountingHolder.Extract();
379 _ASSERTE(IsTieringDelayActive());
380 }
381
382 timerContextHolder.SuppressRelease(); // the timer context is automatically deleted by the timer infrastructure
383 return true;
384}
385
386void WINAPI TieredCompilationManager::TieringDelayTimerCallback(PVOID parameter, BOOLEAN timerFired)
387{
388 WRAPPER_NO_CONTRACT;
389 _ASSERTE(timerFired);
390
391 ThreadpoolMgr::TimerInfoContext* timerContext = (ThreadpoolMgr::TimerInfoContext*)parameter;
392 EX_TRY
393 {
394 GCX_COOP();
395 ManagedThreadBase::ThreadPool(timerContext->AppDomainId, TieringDelayTimerCallbackInAppDomain, nullptr);
396 }
397 EX_CATCH
398 {
399 STRESS_LOG1(LF_TIEREDCOMPILATION, LL_ERROR, "TieredCompilationManager::Tier1DelayTimerCallback: "
400 "Unhandled exception, hr=0x%x\n",
401 GET_EXCEPTION()->GetHR());
402 }
403 EX_END_CATCH(RethrowTerminalExceptions);
404}
405
406void TieredCompilationManager::TieringDelayTimerCallbackInAppDomain(LPVOID parameter)
407{
408 WRAPPER_NO_CONTRACT;
409 GetAppDomain()->GetTieredCompilationManager()->TieringDelayTimerCallbackWorker();
410}
411
412void TieredCompilationManager::TieringDelayTimerCallbackWorker()
413{
414 WRAPPER_NO_CONTRACT;
415 _ASSERTE(GetAppDomain()->GetId() == m_domainId);
416
417 HANDLE tieringDelayTimerHandle;
418 bool tier1CallCountingCandidateMethodRecentlyRecorded;
419 {
420 // It's possible for the timer to tick before it is recorded that the delay is in effect. This lock guarantees that the
421 // delay is in effect.
422 CrstHolder holder(&m_lock);
423 _ASSERTE(IsTieringDelayActive());
424
425 tieringDelayTimerHandle = m_tieringDelayTimerHandle;
426 _ASSERTE(tieringDelayTimerHandle != nullptr);
427
428 tier1CallCountingCandidateMethodRecentlyRecorded = m_tier1CallCountingCandidateMethodRecentlyRecorded;
429 if (tier1CallCountingCandidateMethodRecentlyRecorded)
430 {
431 m_tier1CallCountingCandidateMethodRecentlyRecorded = false;
432 }
433 }
434
435 // Reschedule the timer if there has been recent tier 0 activity (when a new eligible method is called the first time) to
436 // further delay call counting
437 if (tier1CallCountingCandidateMethodRecentlyRecorded)
438 {
439 bool success = false;
440 EX_TRY
441 {
442 if (ThreadpoolMgr::ChangeTimerQueueTimer(
443 tieringDelayTimerHandle,
444 g_pConfig->TieredCompilation_Tier1CallCountingDelayMs(),
445 (DWORD)-1 /* Period, non-repeating */))
446 {
447 success = true;
448 }
449 }
450 EX_CATCH
451 {
452 }
453 EX_END_CATCH(RethrowTerminalExceptions);
454 if (success)
455 {
456 return;
457 }
458 }
459
460 // Exchange information into locals inside the lock
461 SArray<MethodDesc*>* methodsPendingCountingForTier1;
462 bool optimizeMethods;
463 {
464 CrstHolder holder(&m_lock);
465
466 methodsPendingCountingForTier1 = m_methodsPendingCountingForTier1;
467 _ASSERTE(methodsPendingCountingForTier1 != nullptr);
468 m_methodsPendingCountingForTier1 = nullptr;
469
470 _ASSERTE(tieringDelayTimerHandle == m_tieringDelayTimerHandle);
471 m_tieringDelayTimerHandle = nullptr;
472
473 _ASSERTE(!IsTieringDelayActive());
474 optimizeMethods = IncrementWorkerThreadCountIfNeeded();
475 }
476
477 // Install call counters
478 MethodDesc** methods = methodsPendingCountingForTier1->GetElements();
479 COUNT_T methodCount = methodsPendingCountingForTier1->GetCount();
480 for (COUNT_T i = 0; i < methodCount; ++i)
481 {
482 ResumeCountingCalls(methods[i]);
483 }
484 delete methodsPendingCountingForTier1;
485
486 ThreadpoolMgr::DeleteTimerQueueTimer(tieringDelayTimerHandle, nullptr);
487
488 if (optimizeMethods)
489 {
490 OptimizeMethods();
491 }
492}
493
494void TieredCompilationManager::ResumeCountingCalls(MethodDesc* pMethodDesc)
495{
496 WRAPPER_NO_CONTRACT;
497 _ASSERTE(pMethodDesc != nullptr);
498 _ASSERTE(pMethodDesc->IsVersionableWithPrecode());
499
500 pMethodDesc->GetPrecode()->ResetTargetInterlocked();
501}
502
503bool TieredCompilationManager::TryAsyncOptimizeMethods()
504{
505 WRAPPER_NO_CONTRACT;
506 _ASSERTE(DebugGetWorkerThreadCount() != 0);
507
508 // Terminal exceptions escape as exceptions, but all other errors should gracefully
509 // return to the caller. Non-terminal error conditions should be rare (ie OOM,
510 // OS failure to create thread) and we consider it reasonable for some methods
511 // to go unoptimized or have their optimization arbitrarily delayed under these
512 // circumstances.
513 bool success = false;
514 EX_TRY
515 {
516 if (ThreadpoolMgr::QueueUserWorkItem(StaticOptimizeMethodsCallback, this, QUEUE_ONLY, TRUE))
517 {
518 success = true;
519 }
520 else
521 {
522 STRESS_LOG0(LF_TIEREDCOMPILATION, LL_WARNING, "TieredCompilationManager::OnMethodCalled: "
523 "ThreadpoolMgr::QueueUserWorkItem returned FALSE (no thread will run)\n");
524 }
525 }
526 EX_CATCH
527 {
528 STRESS_LOG1(LF_TIEREDCOMPILATION, LL_WARNING, "TieredCompilationManager::OnMethodCalled: "
529 "Exception queuing work item to threadpool, hr=0x%x\n",
530 GET_EXCEPTION()->GetHR());
531 }
532 EX_END_CATCH(RethrowTerminalExceptions);
533 return success;
534}
535
536// This is the initial entrypoint for the background thread, called by
537// the threadpool.
538DWORD WINAPI TieredCompilationManager::StaticOptimizeMethodsCallback(void *args)
539{
540 STANDARD_VM_CONTRACT;
541
542 TieredCompilationManager * pTieredCompilationManager = (TieredCompilationManager *)args;
543 pTieredCompilationManager->OptimizeMethodsCallback();
544
545 return 0;
546}
547
548void TieredCompilationManager::OptimizeMethodsCallback()
549{
550 STANDARD_VM_CONTRACT;
551 _ASSERTE(DebugGetWorkerThreadCount() != 0);
552
553 // This app domain shutdown check isn't required for correctness
554 // but it should reduce some unneeded exceptions trying
555 // to enter a closed AppDomain
556 {
557 CrstHolder holder(&m_lock);
558 if (m_isAppDomainShuttingDown)
559 {
560 DecrementWorkerThreadCount();
561 return;
562 }
563 }
564
565 EX_TRY
566 {
567 GCX_COOP();
568 ENTER_DOMAIN_ID(m_domainId);
569 {
570 OptimizeMethods();
571 }
572 END_DOMAIN_TRANSITION;
573 }
574 EX_CATCH
575 {
576 STRESS_LOG1(LF_TIEREDCOMPILATION, LL_ERROR, "TieredCompilationManager::OptimizeMethodsCallback: "
577 "Unhandled exception on domain transition, hr=0x%x\n",
578 GET_EXCEPTION()->GetHR());
579 }
580 EX_END_CATCH(RethrowTerminalExceptions);
581}
582
583//This method will process one or more methods from optimization queue
584// on a background thread. Each such method will be jitted with code
585// optimizations enabled and then installed as the active implementation
586// of the method entrypoint.
587//
588// We need to be carefuly not to work for too long in a single invocation
589// of this method or we could starve the threadpool and force
590// it to create unnecessary additional threads.
591void TieredCompilationManager::OptimizeMethods()
592{
593 WRAPPER_NO_CONTRACT;
594 _ASSERTE(DebugGetWorkerThreadCount() != 0);
595 _ASSERTE(GetAppDomain()->GetId() == m_domainId);
596
597 ULONGLONG startTickCount = CLRGetTickCount64();
598 NativeCodeVersion nativeCodeVersion;
599 EX_TRY
600 {
601 GCX_PREEMP();
602 while (true)
603 {
604 {
605 CrstHolder holder(&m_lock);
606
607 if (IsTieringDelayActive() || m_isAppDomainShuttingDown)
608 {
609 DecrementWorkerThreadCount();
610 break;
611 }
612
613 nativeCodeVersion = GetNextMethodToOptimize();
614 if (nativeCodeVersion.IsNull())
615 {
616 DecrementWorkerThreadCount();
617 break;
618 }
619 }
620 OptimizeMethod(nativeCodeVersion);
621
622 // If we have been running for too long return the thread to the threadpool and queue another event
623 // This gives the threadpool a chance to service other requests on this thread before returning to
624 // this work.
625 ULONGLONG currentTickCount = CLRGetTickCount64();
626 if (currentTickCount >= startTickCount + m_optimizationQuantumMs)
627 {
628 if (!TryAsyncOptimizeMethods())
629 {
630 CrstHolder holder(&m_lock);
631 DecrementWorkerThreadCount();
632 }
633 break;
634 }
635 }
636 }
637 EX_CATCH
638 {
639 {
640 CrstHolder holder(&m_lock);
641 DecrementWorkerThreadCount();
642 }
643 STRESS_LOG2(LF_TIEREDCOMPILATION, LL_ERROR, "TieredCompilationManager::OptimizeMethods: "
644 "Unhandled exception during method optimization, hr=0x%x, last method=%p\n",
645 GET_EXCEPTION()->GetHR(), nativeCodeVersion.GetMethodDesc());
646 }
647 EX_END_CATCH(RethrowTerminalExceptions);
648}
649
650// Jit compiles and installs new optimized code for a method.
651// Called on a background thread.
652void TieredCompilationManager::OptimizeMethod(NativeCodeVersion nativeCodeVersion)
653{
654 STANDARD_VM_CONTRACT;
655
656 _ASSERTE(nativeCodeVersion.GetMethodDesc()->IsEligibleForTieredCompilation());
657 if (CompileCodeVersion(nativeCodeVersion))
658 {
659 ActivateCodeVersion(nativeCodeVersion);
660 }
661}
662
663// Compiles new optimized code for a method.
664// Called on a background thread.
665BOOL TieredCompilationManager::CompileCodeVersion(NativeCodeVersion nativeCodeVersion)
666{
667 STANDARD_VM_CONTRACT;
668
669 PCODE pCode = NULL;
670 MethodDesc* pMethod = nativeCodeVersion.GetMethodDesc();
671 EX_TRY
672 {
673 pCode = pMethod->PrepareCode(nativeCodeVersion);
674 LOG((LF_TIEREDCOMPILATION, LL_INFO10000, "TieredCompilationManager::CompileCodeVersion Method=0x%pM (%s::%s), code version id=0x%x, code ptr=0x%p\n",
675 pMethod, pMethod->m_pszDebugClassName, pMethod->m_pszDebugMethodName,
676 nativeCodeVersion.GetVersionId(),
677 pCode));
678 }
679 EX_CATCH
680 {
681 // Failing to jit should be rare but acceptable. We will leave whatever code already exists in place.
682 STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "TieredCompilationManager::CompileCodeVersion: Method %pM failed to jit, hr=0x%x\n",
683 pMethod, GET_EXCEPTION()->GetHR());
684 }
685 EX_END_CATCH(RethrowTerminalExceptions)
686
687 return pCode != NULL;
688}
689
690// Updates the MethodDesc and precode so that future invocations of a method will
691// execute the native code pointed to by pCode.
692// Called on a background thread.
693void TieredCompilationManager::ActivateCodeVersion(NativeCodeVersion nativeCodeVersion)
694{
695 STANDARD_VM_CONTRACT;
696
697 MethodDesc* pMethod = nativeCodeVersion.GetMethodDesc();
698 CodeVersionManager* pCodeVersionManager = pMethod->GetCodeVersionManager();
699
700 // If the ilParent version is active this will activate the native code version now.
701 // Otherwise if the ilParent version becomes active again in the future the native
702 // code version will activate then.
703 ILCodeVersion ilParent;
704 HRESULT hr = S_OK;
705 {
706 // As long as we are exclusively using precode publishing for tiered compilation
707 // methods this first attempt should succeed
708 CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
709 ilParent = nativeCodeVersion.GetILCodeVersion();
710 hr = ilParent.SetActiveNativeCodeVersion(nativeCodeVersion, FALSE);
711 LOG((LF_TIEREDCOMPILATION, LL_INFO10000, "TieredCompilationManager::ActivateCodeVersion Method=0x%pM (%s::%s), code version id=0x%x. SetActiveNativeCodeVersion ret=0x%x\n",
712 pMethod, pMethod->m_pszDebugClassName, pMethod->m_pszDebugMethodName,
713 nativeCodeVersion.GetVersionId(),
714 hr));
715 }
716 if (hr == CORPROF_E_RUNTIME_SUSPEND_REQUIRED)
717 {
718 // if we start using jump-stamp publishing for tiered compilation, the first attempt
719 // without the runtime suspended will fail and then this second attempt will
720 // succeed.
721 // Even though this works performance is likely to be quite bad. Realistically
722 // we are going to need batched updates to makes tiered-compilation + jump-stamp
723 // viable. This fallback path is just here as a proof-of-concept.
724 ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_REJIT);
725 {
726 CodeVersionManager::TableLockHolder lock(pCodeVersionManager);
727 hr = ilParent.SetActiveNativeCodeVersion(nativeCodeVersion, TRUE);
728 LOG((LF_TIEREDCOMPILATION, LL_INFO10000, "TieredCompilationManager::ActivateCodeVersion Method=0x%pM (%s::%s), code version id=0x%x. [Suspended] SetActiveNativeCodeVersion ret=0x%x\n",
729 pMethod, pMethod->m_pszDebugClassName, pMethod->m_pszDebugMethodName,
730 nativeCodeVersion.GetVersionId(),
731 hr));
732 }
733 ThreadSuspend::RestartEE(FALSE, TRUE);
734 }
735 if (FAILED(hr))
736 {
737 STRESS_LOG2(LF_TIEREDCOMPILATION, LL_INFO10, "TieredCompilationManager::ActivateCodeVersion: Method %pM failed to publish native code for native code version %d\n",
738 pMethod, nativeCodeVersion.GetVersionId());
739 }
740}
741
742// Dequeues the next method in the optmization queue.
743// This should be called with m_lock already held and runs
744// on the background thread.
745NativeCodeVersion TieredCompilationManager::GetNextMethodToOptimize()
746{
747 STANDARD_VM_CONTRACT;
748
749 SListElem<NativeCodeVersion>* pElem = m_methodsToOptimize.RemoveHead();
750 if (pElem != NULL)
751 {
752 NativeCodeVersion nativeCodeVersion = pElem->GetValue();
753 delete pElem;
754 return nativeCodeVersion;
755 }
756 return NativeCodeVersion();
757}
758
759bool TieredCompilationManager::IncrementWorkerThreadCountIfNeeded()
760{
761 WRAPPER_NO_CONTRACT;
762 // m_lock should be held
763
764 if (0 == m_countOptimizationThreadsRunning &&
765 !m_isAppDomainShuttingDown &&
766 !m_methodsToOptimize.IsEmpty() &&
767 !IsTieringDelayActive())
768 {
769 // Our current policy throttles at 1 thread, but in the future we
770 // could experiment with more parallelism.
771 m_countOptimizationThreadsRunning++;
772 return true;
773 }
774 return false;
775}
776
777void TieredCompilationManager::DecrementWorkerThreadCount()
778{
779 STANDARD_VM_CONTRACT;
780 // m_lock should be held
781 _ASSERTE(m_countOptimizationThreadsRunning != 0);
782
783 m_countOptimizationThreadsRunning--;
784}
785
786#ifdef _DEBUG
787DWORD TieredCompilationManager::DebugGetWorkerThreadCount()
788{
789 WRAPPER_NO_CONTRACT;
790
791 CrstHolder holder(&m_lock);
792 return m_countOptimizationThreadsRunning;
793}
794#endif
795
796//static
797CORJIT_FLAGS TieredCompilationManager::GetJitFlags(NativeCodeVersion nativeCodeVersion)
798{
799 LIMITED_METHOD_CONTRACT;
800
801 CORJIT_FLAGS flags;
802 if (!nativeCodeVersion.GetMethodDesc()->IsEligibleForTieredCompilation())
803 {
804#ifdef FEATURE_INTERPRETER
805 flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE);
806#endif
807 return flags;
808 }
809
810 if (nativeCodeVersion.GetOptimizationTier() == NativeCodeVersion::OptimizationTier0 &&
811 !g_pConfig->TieredCompilation_OptimizeTier0())
812 {
813 flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER0);
814 }
815 else
816 {
817 flags.Set(CORJIT_FLAGS::CORJIT_FLAG_TIER1);
818#ifdef FEATURE_INTERPRETER
819 flags.Set(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE);
820#endif
821 }
822 return flags;
823}
824
825#endif // FEATURE_TIERED_COMPILATION && !DACCESS_COMPILE
826