1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | //========================================================================= |
6 | |
7 | // |
8 | // ThreadPoolRequest.cpp |
9 | // |
10 | |
11 | // |
12 | // |
13 | //========================================================================= |
14 | |
15 | #include "common.h" |
16 | #include "comdelegate.h" |
17 | #include "comthreadpool.h" |
18 | #include "threadpoolrequest.h" |
19 | #include "win32threadpool.h" |
20 | #include "class.h" |
21 | #include "object.h" |
22 | #include "field.h" |
23 | #include "excep.h" |
24 | #include "eeconfig.h" |
25 | #include "corhost.h" |
26 | #include "nativeoverlapped.h" |
27 | #include "appdomain.inl" |
28 | |
29 | BYTE PerAppDomainTPCountList::s_padding[MAX_CACHE_LINE_SIZE - sizeof(LONG)]; |
30 | // Make this point to unmanaged TP in case, no appdomains have initialized yet. |
31 | // Cacheline aligned, hot variable |
32 | DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) LONG PerAppDomainTPCountList::s_ADHint = -1; |
33 | |
34 | // Move out of from preceeding variables' cache line |
35 | DECLSPEC_ALIGN(MAX_CACHE_LINE_SIZE) UnManagedPerAppDomainTPCount PerAppDomainTPCountList::s_unmanagedTPCount; |
36 | //The list of all per-appdomain work-request counts. |
37 | ArrayListStatic PerAppDomainTPCountList::s_appDomainIndexList; |
38 | |
39 | void PerAppDomainTPCountList::InitAppDomainIndexList() |
40 | { |
41 | LIMITED_METHOD_CONTRACT; |
42 | s_appDomainIndexList.Init(); |
43 | } |
44 | |
45 | |
46 | //--------------------------------------------------------------------------- |
47 | //AddNewTPIndex adds and returns a per-appdomain TP entry whenever a new appdomain |
48 | //is created. Our list count should be equal to the max number of appdomains created |
49 | //in the system. |
50 | // |
51 | //Assumptions: |
52 | //This function needs to be called under the SystemDomain lock. |
53 | //The ArrayListStatic data dtructure allows traversing of the counts without a |
54 | //lock, but addition to the list requires synchronization. |
55 | // |
56 | TPIndex PerAppDomainTPCountList::AddNewTPIndex() |
57 | { |
58 | STANDARD_VM_CONTRACT; |
59 | |
60 | DWORD count = s_appDomainIndexList.GetCount(); |
61 | DWORD i = FindFirstFreeTpEntry(); |
62 | |
63 | if (i == UNUSED_THREADPOOL_INDEX) |
64 | i = count; |
65 | |
66 | TPIndex index(i+1); |
67 | if(count > i) |
68 | { |
69 | |
70 | IPerAppDomainTPCount * pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(i)); |
71 | pAdCount->SetTPIndex(index); |
72 | return index; |
73 | } |
74 | |
75 | #ifdef _MSC_VER |
76 | // Disable this warning - we intentionally want __declspec(align()) to insert trailing padding for us |
77 | #pragma warning(disable:4316) // Object allocated on the heap may not be aligned for this type. |
78 | #endif |
79 | ManagedPerAppDomainTPCount * pAdCount = new ManagedPerAppDomainTPCount(index); |
80 | #ifdef _MSC_VER |
81 | #pragma warning(default:4316) // Object allocated on the heap may not be aligned for this type. |
82 | #endif |
83 | pAdCount->ResetState(); |
84 | |
85 | IfFailThrow(s_appDomainIndexList.Append(pAdCount)); |
86 | |
87 | return index; |
88 | } |
89 | |
90 | DWORD PerAppDomainTPCountList::FindFirstFreeTpEntry() |
91 | { |
92 | CONTRACTL |
93 | { |
94 | NOTHROW; |
95 | MODE_ANY; |
96 | GC_NOTRIGGER; |
97 | } |
98 | CONTRACTL_END; |
99 | |
100 | DWORD DwnumADs = s_appDomainIndexList.GetCount(); |
101 | DWORD Dwi; |
102 | IPerAppDomainTPCount * pAdCount; |
103 | DWORD DwfreeIndex = UNUSED_THREADPOOL_INDEX; |
104 | |
105 | for (Dwi=0;Dwi < DwnumADs;Dwi++) |
106 | { |
107 | pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(Dwi)); |
108 | _ASSERTE(pAdCount); |
109 | |
110 | if(pAdCount->IsTPIndexUnused()) |
111 | { |
112 | DwfreeIndex = Dwi; |
113 | STRESS_LOG1(LF_THREADPOOL, LL_INFO1000, "FindFirstFreeTpEntry: reusing index %d\n" , DwfreeIndex + 1); |
114 | break; |
115 | } |
116 | } |
117 | |
118 | return DwfreeIndex; |
119 | } |
120 | |
121 | |
122 | void PerAppDomainTPCountList::SetAppDomainId(TPIndex index, ADID id) |
123 | { |
124 | CONTRACTL |
125 | { |
126 | NOTHROW; |
127 | MODE_ANY; |
128 | GC_TRIGGERS; |
129 | } |
130 | CONTRACTL_END; |
131 | |
132 | IPerAppDomainTPCount * pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(index.m_dwIndex-1)); |
133 | |
134 | //SetAppDomainID needs to be called after the PerDomainCount has been |
135 | //succesfully allocated for the appdomain. |
136 | _ASSERTE(pAdCount); |
137 | |
138 | STRESS_LOG2(LF_THREADPOOL, LL_INFO1000, "SetAppDomainId: index %d id %d\n" , index.m_dwIndex, id.m_dwId); |
139 | pAdCount->SetAppDomainId(id); |
140 | } |
141 | |
142 | //--------------------------------------------------------------------------- |
143 | //ResetAppDomainIndex: Resets the AppDomain ID and the per-appdomain |
144 | // thread pool counts |
145 | // |
146 | //Arguments: |
147 | //index - The index into the s_appDomainIndexList for the AppDomain we're |
148 | // trying to clear (the AD being unloaded) |
149 | // |
150 | //Assumptions: |
151 | //This function needs to be called from the AD unload thread after all domain |
152 | //bound objects have been finalized when it's safe to recycle the TPIndex. |
153 | //ClearAppDomainRequestsActive can be called from this function because no |
154 | // managed code is running (If managed code is running, this function needs |
155 | //to be called under a managed per-appdomain lock). |
156 | // |
157 | void PerAppDomainTPCountList::ResetAppDomainIndex(TPIndex index) |
158 | { |
159 | CONTRACTL |
160 | { |
161 | NOTHROW; |
162 | MODE_ANY; |
163 | GC_TRIGGERS; |
164 | } |
165 | CONTRACTL_END; |
166 | |
167 | IPerAppDomainTPCount * pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(index.m_dwIndex-1)); |
168 | _ASSERTE(pAdCount); |
169 | |
170 | STRESS_LOG2(LF_THREADPOOL, LL_INFO1000, "ResetAppDomainIndex: index %d pAdCount %p\n" , index.m_dwIndex, pAdCount); |
171 | |
172 | pAdCount->ResetState(); |
173 | pAdCount->SetTPIndexUnused(); |
174 | } |
175 | |
176 | //--------------------------------------------------------------------------- |
177 | //ResetAppDomainTPCounts: Resets the per-appdomain thread pool counts for a |
178 | // given AppDomain. Don't clear the ADID until we can |
179 | // safely recycle the TPIndex |
180 | // |
181 | //Arguments: |
182 | //index - The index into the s_appDomainIndexList for the AppDomain we're |
183 | // trying to clear |
184 | // |
185 | //Assumptions: |
186 | //This function needs to be called from the AD unload thread after we make sure |
187 | //that no more code is running in unmanaged code. ClearAppDomainRequestsActive |
188 | //can be called from this function because no managed code is running (If |
189 | //managed code is running, this function needs to be called under a managed |
190 | //per-appdomain lock). |
191 | // |
192 | void PerAppDomainTPCountList::ResetAppDomainTPCounts(TPIndex index) |
193 | { |
194 | CONTRACTL |
195 | { |
196 | NOTHROW; |
197 | MODE_ANY; |
198 | GC_TRIGGERS; |
199 | } |
200 | CONTRACTL_END; |
201 | |
202 | IPerAppDomainTPCount * pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(index.m_dwIndex-1)); |
203 | _ASSERTE(pAdCount); |
204 | |
205 | STRESS_LOG2(LF_THREADPOOL, LL_INFO1000, "ResetAppDomainTPCounts: index %d pAdCount %p\n" , index.m_dwIndex, pAdCount); |
206 | //Correct the thread pool counts, in case the appdomain was unloaded rudely. |
207 | if(pAdCount->IsRequestPending()) |
208 | { |
209 | ThreadpoolMgr::ClearAppDomainRequestsActive(FALSE, TRUE, (LONG)index.m_dwIndex); |
210 | } |
211 | |
212 | pAdCount->ClearAppDomainRequestsActive(TRUE); |
213 | } |
214 | |
215 | //--------------------------------------------------------------------------- |
216 | //AreRequestsPendingInAnyAppDomains checks to see if there any requests pending |
217 | //in other appdomains. It also checks for pending unmanaged work requests. |
218 | //This function is called at end of thread quantum to see if the thread needs to |
219 | //transition into a different appdomain. This function may also be called by |
220 | //the scheduler to check for any unscheduled work. |
221 | // |
222 | bool PerAppDomainTPCountList::AreRequestsPendingInAnyAppDomains() |
223 | { |
224 | CONTRACTL |
225 | { |
226 | NOTHROW; |
227 | MODE_ANY; |
228 | GC_NOTRIGGER; |
229 | SO_TOLERANT; //Its ok for tis function to fail. |
230 | } |
231 | CONTRACTL_END; |
232 | |
233 | DWORD DwnumADs = s_appDomainIndexList.GetCount(); |
234 | DWORD Dwi; |
235 | IPerAppDomainTPCount * pAdCount; |
236 | bool fRequestsPending = false; |
237 | |
238 | for (Dwi=0;Dwi < DwnumADs;Dwi++) |
239 | { |
240 | |
241 | pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(Dwi)); |
242 | _ASSERTE(pAdCount); |
243 | |
244 | if(pAdCount->IsRequestPending()) |
245 | { |
246 | fRequestsPending = true; |
247 | break; |
248 | } |
249 | } |
250 | |
251 | if(s_unmanagedTPCount.IsRequestPending()) |
252 | { |
253 | fRequestsPending = true; |
254 | } |
255 | |
256 | return fRequestsPending; |
257 | } |
258 | |
259 | |
260 | //--------------------------------------------------------------------------- |
261 | //GetAppDomainIndexForThreadpoolDispatch is essentailly the |
262 | //"AppDomain Scheduler". This function makes fairness/policy decisions as to |
263 | //which appdomain the thread needs to enter to. This function needs to guarantee |
264 | //that all appdomain work requests are processed fairly. At this time all |
265 | //appdomain requests and the unmanaged work requests are treated with the same |
266 | //priority. |
267 | // |
268 | //Return Value: |
269 | //The appdomain ID in which to dispatch the worker thread,nmanaged work items |
270 | //need to be processed. |
271 | // |
272 | LONG PerAppDomainTPCountList::GetAppDomainIndexForThreadpoolDispatch() |
273 | { |
274 | CONTRACTL |
275 | { |
276 | NOTHROW; |
277 | MODE_ANY; |
278 | GC_NOTRIGGER; |
279 | } |
280 | CONTRACTL_END; |
281 | |
282 | LONG hint = s_ADHint; |
283 | DWORD count = s_appDomainIndexList.GetCount(); |
284 | IPerAppDomainTPCount * pAdCount; |
285 | DWORD Dwi; |
286 | |
287 | |
288 | if (hint != -1) |
289 | { |
290 | pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(hint)); |
291 | } |
292 | else |
293 | { |
294 | pAdCount = &s_unmanagedTPCount; |
295 | } |
296 | |
297 | //temphint ensures that the check for appdomains proceeds in a pure round robin fashion. |
298 | LONG temphint = hint; |
299 | |
300 | _ASSERTE( pAdCount); |
301 | |
302 | if (pAdCount->TakeActiveRequest()) |
303 | goto HintDone; |
304 | |
305 | //If there is no work in any appdomains, check the unmanaged queue, |
306 | hint = -1; |
307 | |
308 | for (Dwi=0;Dwi<count;Dwi++) |
309 | { |
310 | if (temphint == -1) |
311 | { |
312 | temphint = 0; |
313 | } |
314 | |
315 | pAdCount = dac_cast<PTR_IPerAppDomainTPCount>(s_appDomainIndexList.Get(temphint)); |
316 | if (pAdCount->TakeActiveRequest()) |
317 | { |
318 | hint = temphint; |
319 | goto HintDone; |
320 | } |
321 | |
322 | temphint++; |
323 | |
324 | _ASSERTE( temphint <= (LONG)count); |
325 | |
326 | if(temphint == (LONG)count) |
327 | { |
328 | temphint = 0; |
329 | } |
330 | } |
331 | |
332 | if (hint == -1 && !s_unmanagedTPCount.TakeActiveRequest()) |
333 | { |
334 | //no work! |
335 | return 0; |
336 | } |
337 | |
338 | HintDone: |
339 | |
340 | if((hint+1) < (LONG)count) |
341 | { |
342 | s_ADHint = hint+1; |
343 | } |
344 | else |
345 | { |
346 | s_ADHint = -1; |
347 | } |
348 | |
349 | if (hint == -1) |
350 | { |
351 | return hint; |
352 | } |
353 | else |
354 | { |
355 | return (hint+1); |
356 | } |
357 | } |
358 | |
359 | |
360 | void UnManagedPerAppDomainTPCount::SetAppDomainRequestsActive() |
361 | { |
362 | WRAPPER_NO_CONTRACT; |
363 | #ifndef DACCESS_COMPILE |
364 | LONG count = VolatileLoad(&m_outstandingThreadRequestCount); |
365 | while (count < (LONG)ThreadpoolMgr::NumberOfProcessors) |
366 | { |
367 | LONG prevCount = FastInterlockCompareExchange(&m_outstandingThreadRequestCount, count+1, count); |
368 | if (prevCount == count) |
369 | { |
370 | ThreadpoolMgr::MaybeAddWorkingWorker(); |
371 | ThreadpoolMgr::EnsureGateThreadRunning(); |
372 | break; |
373 | } |
374 | count = prevCount; |
375 | } |
376 | #endif |
377 | } |
378 | |
379 | bool FORCEINLINE UnManagedPerAppDomainTPCount::TakeActiveRequest() |
380 | { |
381 | LIMITED_METHOD_CONTRACT; |
382 | LONG count = VolatileLoad(&m_outstandingThreadRequestCount); |
383 | |
384 | while (count > 0) |
385 | { |
386 | LONG prevCount = FastInterlockCompareExchange(&m_outstandingThreadRequestCount, count-1, count); |
387 | if (prevCount == count) |
388 | return true; |
389 | count = prevCount; |
390 | } |
391 | |
392 | return false; |
393 | } |
394 | |
395 | |
396 | FORCEINLINE void ReleaseWorkRequest(WorkRequest *workRequest) { ThreadpoolMgr::RecycleMemory( workRequest, ThreadpoolMgr::MEMTYPE_WorkRequest ); } |
397 | typedef Wrapper< WorkRequest *, DoNothing<WorkRequest *>, ReleaseWorkRequest > WorkRequestHolder; |
398 | |
399 | void UnManagedPerAppDomainTPCount::QueueUnmanagedWorkRequest(LPTHREAD_START_ROUTINE function, PVOID context) |
400 | { |
401 | CONTRACTL |
402 | { |
403 | THROWS; |
404 | GC_TRIGGERS; |
405 | MODE_ANY; |
406 | } |
407 | CONTRACTL_END;; |
408 | |
409 | #ifndef DACCESS_COMPILE |
410 | WorkRequestHolder pWorkRequest; |
411 | |
412 | //Note, ideally we would want to use our own queues instead of those in |
413 | //the thread pool class. However, the queus in thread pool class have |
414 | //caching support, that shares memory with other commonly used structures |
415 | //in the VM thread pool implementation. So, we decided to leverage those. |
416 | |
417 | pWorkRequest = ThreadpoolMgr::MakeWorkRequest(function, context); |
418 | |
419 | //MakeWorkRequest should throw if unable to allocate memory |
420 | _ASSERTE(pWorkRequest != NULL); |
421 | PREFIX_ASSUME(pWorkRequest != NULL); |
422 | |
423 | if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadPoolEnqueue) && |
424 | !ThreadpoolMgr::AreEtwQueueEventsSpeciallyHandled(function)) |
425 | FireEtwThreadPoolEnqueue(pWorkRequest, GetClrInstanceId()); |
426 | |
427 | m_lock.Init(LOCK_TYPE_DEFAULT); |
428 | |
429 | { |
430 | SpinLock::Holder slh(&m_lock); |
431 | |
432 | ThreadpoolMgr::EnqueueWorkRequest(pWorkRequest); |
433 | pWorkRequest.SuppressRelease(); |
434 | m_NumRequests++; |
435 | } |
436 | |
437 | SetAppDomainRequestsActive(); |
438 | #endif //DACCESS_COMPILE |
439 | } |
440 | |
441 | PVOID UnManagedPerAppDomainTPCount::DeQueueUnManagedWorkRequest(bool* lastOne) |
442 | { |
443 | CONTRACTL |
444 | { |
445 | NOTHROW; |
446 | GC_TRIGGERS; |
447 | MODE_ANY; |
448 | } |
449 | CONTRACTL_END;; |
450 | |
451 | *lastOne = true; |
452 | |
453 | WorkRequest * pWorkRequest = ThreadpoolMgr::DequeueWorkRequest(); |
454 | |
455 | if (pWorkRequest) |
456 | { |
457 | m_NumRequests--; |
458 | |
459 | if(m_NumRequests > 0) |
460 | *lastOne = false; |
461 | } |
462 | |
463 | return (PVOID) pWorkRequest; |
464 | } |
465 | |
466 | //--------------------------------------------------------------------------- |
467 | //DispatchWorkItem manages dispatching of unmanaged work requests. It keeps |
468 | //processing unmanaged requests for the "Quanta". Essentially this function is |
469 | //a tight loop of dequeueing unmanaged work requests and dispatching them. |
470 | // |
471 | void UnManagedPerAppDomainTPCount::DispatchWorkItem(bool* foundWork, bool* wasNotRecalled) |
472 | { |
473 | #ifndef DACCESS_COMPILE |
474 | *foundWork = false; |
475 | *wasNotRecalled = true; |
476 | |
477 | bool enableWorkerTracking = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ThreadPool_EnableWorkerTracking) ? true : false; |
478 | |
479 | DWORD startTime; |
480 | DWORD endTime; |
481 | |
482 | startTime = GetTickCount(); |
483 | |
484 | //For all practical puposes, the unmanaged part of thread pool is treated |
485 | //as a special appdomain for thread pool purposes. The same logic as the |
486 | //one in managed code for dispatching thread pool requests is repeated here. |
487 | //Namely we continue to process requests until eithere there are none, or |
488 | //the "Quanta has expired". See threadpool.cs for the managed counterpart. |
489 | |
490 | WorkRequest * pWorkRequest=NULL; |
491 | LPTHREAD_START_ROUTINE wrFunction; |
492 | LPVOID wrContext; |
493 | |
494 | bool firstIteration = true; |
495 | bool lastOne = false; |
496 | |
497 | while (*wasNotRecalled) |
498 | { |
499 | m_lock.Init(LOCK_TYPE_DEFAULT); |
500 | { |
501 | SpinLock::Holder slh(&m_lock); |
502 | pWorkRequest = (WorkRequest*) DeQueueUnManagedWorkRequest(&lastOne); |
503 | } |
504 | |
505 | if (NULL == pWorkRequest) |
506 | break; |
507 | |
508 | if (firstIteration && !lastOne) |
509 | SetAppDomainRequestsActive(); |
510 | |
511 | firstIteration = false; |
512 | *foundWork = true; |
513 | |
514 | if (GCHeapUtilities::IsGCInProgress(TRUE)) |
515 | { |
516 | // GC is imminent, so wait until GC is complete before executing next request. |
517 | // this reduces in-flight objects allocated right before GC, easing the GC's work |
518 | GCHeapUtilities::WaitForGCCompletion(TRUE); |
519 | } |
520 | |
521 | PREFIX_ASSUME(pWorkRequest != NULL); |
522 | _ASSERTE(pWorkRequest); |
523 | |
524 | wrFunction = pWorkRequest->Function; |
525 | wrContext = pWorkRequest->Context; |
526 | |
527 | if (ETW_EVENT_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context, ThreadPoolDequeue) && |
528 | !ThreadpoolMgr::AreEtwQueueEventsSpeciallyHandled(wrFunction)) |
529 | FireEtwThreadPoolDequeue(pWorkRequest, GetClrInstanceId()); |
530 | |
531 | ThreadpoolMgr::FreeWorkRequest(pWorkRequest); |
532 | |
533 | if (enableWorkerTracking) |
534 | { |
535 | ThreadpoolMgr::ReportThreadStatus(true); |
536 | (wrFunction) (wrContext); |
537 | ThreadpoolMgr::ReportThreadStatus(false); |
538 | } |
539 | else |
540 | { |
541 | (wrFunction) (wrContext); |
542 | } |
543 | |
544 | ThreadpoolMgr::NotifyWorkItemCompleted(); |
545 | if (ThreadpoolMgr::ShouldAdjustMaxWorkersActive()) |
546 | { |
547 | DangerousNonHostedSpinLockTryHolder tal(&ThreadpoolMgr::ThreadAdjustmentLock); |
548 | if (tal.Acquired()) |
549 | { |
550 | ThreadpoolMgr::AdjustMaxWorkersActive(); |
551 | } |
552 | else |
553 | { |
554 | // the lock is held by someone else, so they will take care of this for us. |
555 | } |
556 | } |
557 | *wasNotRecalled = ThreadpoolMgr::ShouldWorkerKeepRunning(); |
558 | |
559 | Thread *pThread = GetThread(); |
560 | if (pThread) |
561 | { |
562 | if (pThread->IsAbortRequested()) |
563 | { |
564 | pThread->EEResetAbort(Thread::TAR_ALL); |
565 | } |
566 | pThread->InternalReset(); |
567 | } |
568 | |
569 | endTime = GetTickCount(); |
570 | |
571 | if ((endTime - startTime) >= TP_QUANTUM) |
572 | { |
573 | break; |
574 | } |
575 | } |
576 | |
577 | // if we're exiting for any reason other than the queue being empty, then we need to make sure another thread |
578 | // will visit us later. |
579 | if (NULL != pWorkRequest) |
580 | { |
581 | SetAppDomainRequestsActive(); |
582 | } |
583 | |
584 | #endif //DACCESS_COMPILE |
585 | } |
586 | |
587 | |
588 | void ManagedPerAppDomainTPCount::SetAppDomainRequestsActive() |
589 | { |
590 | //This function should either be called by managed code or during AD unload, but before |
591 | //the TpIndex is set to unused. |
592 | // |
593 | // Note that there is a separate count in managed code that stays in sync with this one over time. |
594 | // The manage count is incremented before this one, and this one is decremented before the managed |
595 | // one. |
596 | // |
597 | |
598 | _ASSERTE(m_index.m_dwIndex != UNUSED_THREADPOOL_INDEX); |
599 | _ASSERTE(m_id.m_dwId != 0); |
600 | |
601 | #ifndef DACCESS_COMPILE |
602 | LONG count = VolatileLoad(&m_numRequestsPending); |
603 | while (count != ADUnloading) |
604 | { |
605 | LONG prev = FastInterlockCompareExchange(&m_numRequestsPending, count+1, count); |
606 | if (prev == count) |
607 | { |
608 | ThreadpoolMgr::MaybeAddWorkingWorker(); |
609 | ThreadpoolMgr::EnsureGateThreadRunning(); |
610 | break; |
611 | } |
612 | count = prev; |
613 | } |
614 | #endif |
615 | } |
616 | |
617 | void ManagedPerAppDomainTPCount::ClearAppDomainRequestsActive(BOOL bADU) |
618 | { |
619 | LIMITED_METHOD_CONTRACT; |
620 | //This function should either be called by managed code or during AD unload, but before |
621 | //the TpIndex is set to unused. |
622 | |
623 | _ASSERTE(m_index.m_dwIndex != UNUSED_THREADPOOL_INDEX); |
624 | _ASSERTE(m_id.m_dwId != 0); |
625 | |
626 | if (bADU) |
627 | { |
628 | VolatileStore(&m_numRequestsPending, ADUnloading); |
629 | } |
630 | else |
631 | { |
632 | LONG count = VolatileLoad(&m_numRequestsPending); |
633 | // Test is: count > 0 && count != ADUnloading |
634 | // Since: const ADUnloading == -1 |
635 | // Both are tested: (count > 0) means following also true (count != ADUnloading) |
636 | while (count > 0) |
637 | { |
638 | LONG prev = FastInterlockCompareExchange(&m_numRequestsPending, 0, count); |
639 | if (prev == count) |
640 | break; |
641 | count = prev; |
642 | } |
643 | } |
644 | } |
645 | |
646 | bool ManagedPerAppDomainTPCount::TakeActiveRequest() |
647 | { |
648 | LIMITED_METHOD_CONTRACT; |
649 | LONG count = VolatileLoad(&m_numRequestsPending); |
650 | // Test is: count > 0 && count != ADUnloading |
651 | // Since: const ADUnloading == -1 |
652 | // Both are tested: (count > 0) means following also true (count != ADUnloading) |
653 | while (count > 0) |
654 | { |
655 | LONG prev = FastInterlockCompareExchange(&m_numRequestsPending, count-1, count); |
656 | if (prev == count) |
657 | return true; |
658 | count = prev; |
659 | } |
660 | return false; |
661 | } |
662 | |
663 | void ManagedPerAppDomainTPCount::ClearAppDomainUnloading() |
664 | { |
665 | CONTRACTL |
666 | { |
667 | THROWS; |
668 | GC_TRIGGERS; |
669 | MODE_ANY; |
670 | } |
671 | CONTRACTL_END; |
672 | |
673 | #ifndef DACCESS_COMPILE |
674 | // |
675 | // While the AD was trying to unload, we may have queued some work. We would not |
676 | // have added that work to this count, because the AD was unloading. So we assume |
677 | // here that we have work to do. |
678 | // |
679 | // We set this to NumberOfProcessors because that's the maximum count that the AD |
680 | // might have tried to add. It's OK for this count to be larger than the AD thinks |
681 | // it should be, but if it's smaller then we will be permanently out of sync with the |
682 | // AD. |
683 | // |
684 | VolatileStore(&m_numRequestsPending, (LONG)ThreadpoolMgr::NumberOfProcessors); |
685 | if (ThreadpoolMgr::IsInitialized()) |
686 | { |
687 | ThreadpoolMgr::MaybeAddWorkingWorker(); |
688 | ThreadpoolMgr::EnsureGateThreadRunning(); |
689 | } |
690 | #endif |
691 | } |
692 | |
693 | |
694 | #ifndef DACCESS_COMPILE |
695 | |
696 | //--------------------------------------------------------------------------- |
697 | //DispatchWorkItem makes sure the right exception handling frames are setup, |
698 | //the thread is transitioned into the correct appdomain, and the right managed |
699 | //callback is called. |
700 | // |
701 | void ManagedPerAppDomainTPCount::DispatchWorkItem(bool* foundWork, bool* wasNotRecalled) |
702 | { |
703 | *foundWork = false; |
704 | *wasNotRecalled = true; |
705 | |
706 | HRESULT hr; |
707 | Thread * pThread = GetThread(); |
708 | if (pThread == NULL) |
709 | { |
710 | ClrFlsSetThreadType(ThreadType_Threadpool_Worker); |
711 | pThread = SetupThreadNoThrow(&hr); |
712 | if (pThread == NULL) |
713 | { |
714 | return; |
715 | } |
716 | } |
717 | |
718 | //We are in a state where AppDomain Unload has begun, but not all threads have been |
719 | //forced out of the unloading domain. This check below will prevent us from getting |
720 | //unmanaged AD unloaded exceptions while trying to enter an unloaded appdomain. |
721 | if (!IsAppDomainUnloading()) |
722 | { |
723 | CONTRACTL |
724 | { |
725 | MODE_PREEMPTIVE; |
726 | THROWS; |
727 | GC_TRIGGERS; |
728 | } |
729 | CONTRACTL_END; |
730 | |
731 | GCX_COOP(); |
732 | BEGIN_SO_INTOLERANT_CODE(pThread); |
733 | |
734 | // |
735 | // NOTE: there is a potential race between the time we retrieve the app |
736 | // domain pointer, and the time which this thread enters the domain. |
737 | // |
738 | // To solve the race, we rely on the fact that there is a thread sync (via |
739 | // GC) between releasing an app domain's handle, and destroying the |
740 | // app domain. Thus it is important that we not go into preemptive gc mode |
741 | // in that window. |
742 | // |
743 | |
744 | { |
745 | ADID appDomainId(m_id); |
746 | |
747 | // This TPIndex may have been recycled since we chose it for workitem dispatch. |
748 | // Thus it's possible for the ADID we just read to refer to an AppDomain that's still |
749 | // being created. If so, the new AppDomain will necessarily have zero requests |
750 | // pending (because the destruction of the previous AD that used this TPIndex |
751 | // will have reset this object). We don't want to call into such an AppDomain. |
752 | // TODO: fix this another way! |
753 | // if (IsRequestPending()) |
754 | { |
755 | ManagedThreadBase::ThreadPool(appDomainId, QueueUserWorkItemManagedCallback, wasNotRecalled); |
756 | } |
757 | |
758 | if (pThread->IsAbortRequested()) |
759 | { |
760 | // thread was aborted, and may not have had a chance to tell us it has work. |
761 | ENTER_DOMAIN_ID(m_id) |
762 | { |
763 | ThreadpoolMgr::SetAppDomainRequestsActive(); |
764 | ThreadpoolMgr::QueueUserWorkItem(NULL, |
765 | NULL, |
766 | 0, |
767 | FALSE); |
768 | |
769 | } |
770 | END_DOMAIN_TRANSITION; |
771 | } |
772 | } |
773 | |
774 | // We should have released all locks. |
775 | _ASSERTE(g_fEEShutDown || pThread->m_dwLockCount == 0 || pThread->m_fRudeAborted); |
776 | |
777 | END_SO_INTOLERANT_CODE; |
778 | |
779 | *foundWork = true; |
780 | } |
781 | else |
782 | { |
783 | __SwitchToThread(0, CALLER_LIMITS_SPINNING); |
784 | return; |
785 | } |
786 | } |
787 | |
788 | #endif // !DACCESS_COMPILE |
789 | |