1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | |
6 | #include "common.h" |
7 | #include "stringliteralmap.h" |
8 | #include "virtualcallstub.h" |
9 | #include "threadsuspend.h" |
10 | #ifndef DACCESS_COMPILE |
11 | #include "comdelegate.h" |
12 | #endif |
13 | #include "comcallablewrapper.h" |
14 | |
15 | //***************************************************************************** |
16 | // Used by LoaderAllocator::Init for easier readability. |
17 | #ifdef ENABLE_PERF_COUNTERS |
18 | #define LOADERHEAP_PROFILE_COUNTER (&(GetPerfCounters().m_Loading.cbLoaderHeapSize)) |
19 | #else |
20 | #define LOADERHEAP_PROFILE_COUNTER (NULL) |
21 | #endif |
22 | |
23 | #ifndef CROSSGEN_COMPILE |
24 | #define STUBMANAGER_RANGELIST(stubManager) (stubManager::g_pManager->GetRangeList()) |
25 | #else |
26 | #define STUBMANAGER_RANGELIST(stubManager) (NULL) |
27 | #endif |
28 | |
29 | UINT64 LoaderAllocator::cLoaderAllocatorsCreated = 1; |
30 | |
31 | LoaderAllocator::LoaderAllocator() |
32 | { |
33 | LIMITED_METHOD_CONTRACT; |
34 | |
35 | // initialize all members up front to NULL so that short-circuit failure won't cause invalid values |
36 | m_InitialReservedMemForLoaderHeaps = NULL; |
37 | m_pLowFrequencyHeap = NULL; |
38 | m_pHighFrequencyHeap = NULL; |
39 | m_pStubHeap = NULL; |
40 | m_pPrecodeHeap = NULL; |
41 | m_pExecutableHeap = NULL; |
42 | #ifdef FEATURE_READYTORUN |
43 | m_pDynamicHelpersHeap = NULL; |
44 | #endif |
45 | m_pFuncPtrStubs = NULL; |
46 | m_hLoaderAllocatorObjectHandle = NULL; |
47 | m_pStringLiteralMap = NULL; |
48 | |
49 | m_cReferences = (UINT32)-1; |
50 | |
51 | m_pFirstDomainAssemblyFromSameALCToDelete = NULL; |
52 | |
53 | #ifdef FAT_DISPATCH_TOKENS |
54 | // DispatchTokenFat pointer table for token overflow scenarios. Lazily allocated. |
55 | m_pFatTokenSetLock = NULL; |
56 | m_pFatTokenSet = NULL; |
57 | #endif |
58 | |
59 | #ifndef CROSSGEN_COMPILE |
60 | m_pVirtualCallStubManager = NULL; |
61 | #endif |
62 | |
63 | m_fGCPressure = false; |
64 | m_fTerminated = false; |
65 | m_fUnloaded = false; |
66 | m_fMarked = false; |
67 | m_pLoaderAllocatorDestroyNext = NULL; |
68 | m_pDomain = NULL; |
69 | m_pCodeHeapInitialAlloc = NULL; |
70 | m_pVSDHeapInitialAlloc = NULL; |
71 | m_pLastUsedCodeHeap = NULL; |
72 | m_pLastUsedDynamicCodeHeap = NULL; |
73 | m_pJumpStubCache = NULL; |
74 | m_IsCollectible = false; |
75 | |
76 | #ifdef FEATURE_COMINTEROP |
77 | m_pComCallWrapperCache = NULL; |
78 | #endif |
79 | |
80 | m_pUMEntryThunkCache = NULL; |
81 | |
82 | m_nLoaderAllocator = InterlockedIncrement64((LONGLONG *)&LoaderAllocator::cLoaderAllocatorsCreated); |
83 | } |
84 | |
85 | LoaderAllocator::~LoaderAllocator() |
86 | { |
87 | CONTRACTL |
88 | { |
89 | DESTRUCTOR_CHECK; |
90 | } |
91 | CONTRACTL_END; |
92 | #if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE) |
93 | Terminate(); |
94 | |
95 | // Assert that VSD is not still active when the destructor is called. |
96 | _ASSERTE(m_pVirtualCallStubManager == NULL); |
97 | |
98 | // Code manager is responsible for cleaning up. |
99 | _ASSERTE(m_pJumpStubCache == NULL); |
100 | #endif |
101 | } |
102 | |
103 | #ifndef DACCESS_COMPILE |
104 | //--------------------------------------------------------------------------------------- |
105 | // |
106 | void LoaderAllocator::AddReference() |
107 | { |
108 | CONTRACTL |
109 | { |
110 | NOTHROW; |
111 | GC_NOTRIGGER; |
112 | MODE_ANY; |
113 | } |
114 | CONTRACTL_END; |
115 | |
116 | _ASSERTE((m_cReferences > (UINT32)0) && (m_cReferences != (UINT32)-1)); |
117 | FastInterlockIncrement((LONG *)&m_cReferences); |
118 | } |
119 | #endif //!DACCESS_COMPILE |
120 | |
121 | //--------------------------------------------------------------------------------------- |
122 | // |
123 | // Adds reference if the native object is alive - code:LoaderAllocator#AssemblyPhases. |
124 | // Returns TRUE if the reference was added. |
125 | // |
126 | BOOL LoaderAllocator::AddReferenceIfAlive() |
127 | { |
128 | CONTRACTL |
129 | { |
130 | NOTHROW; |
131 | GC_NOTRIGGER; |
132 | MODE_ANY; |
133 | } |
134 | CONTRACTL_END; |
135 | |
136 | #ifndef DACCESS_COMPILE |
137 | for (;;) |
138 | { |
139 | // Local snaphost of ref-count |
140 | UINT32 cReferencesLocalSnapshot = m_cReferences; |
141 | _ASSERTE(cReferencesLocalSnapshot != (UINT32)-1); |
142 | |
143 | if (cReferencesLocalSnapshot == 0) |
144 | { // Ref-count was 0, do not AddRef |
145 | return FALSE; |
146 | } |
147 | |
148 | UINT32 cOriginalReferences = FastInterlockCompareExchange( |
149 | (LONG *)&m_cReferences, |
150 | cReferencesLocalSnapshot + 1, |
151 | cReferencesLocalSnapshot); |
152 | |
153 | if (cOriginalReferences == cReferencesLocalSnapshot) |
154 | { // The exchange happened |
155 | return TRUE; |
156 | } |
157 | // Let's spin till we are the only thread to modify this value |
158 | } |
159 | #else //DACCESS_COMPILE |
160 | // DAC won't AddRef |
161 | return IsAlive(); |
162 | #endif //DACCESS_COMPILE |
163 | } // LoaderAllocator::AddReferenceIfAlive |
164 | |
165 | //--------------------------------------------------------------------------------------- |
166 | // |
167 | BOOL LoaderAllocator::Release() |
168 | { |
169 | CONTRACTL |
170 | { |
171 | NOTHROW; |
172 | GC_NOTRIGGER; |
173 | MODE_ANY; |
174 | } |
175 | CONTRACTL_END; |
176 | |
177 | // Only actually destroy the domain assembly when all references to it are gone. |
178 | // This should preserve behavior in the debugger such that an UnloadModule event |
179 | // will occur before the underlying data structure cease functioning. |
180 | #ifndef DACCESS_COMPILE |
181 | |
182 | _ASSERTE((m_cReferences > (UINT32)0) && (m_cReferences != (UINT32)-1)); |
183 | LONG cNewReferences = FastInterlockDecrement((LONG *)&m_cReferences); |
184 | return (cNewReferences == 0); |
185 | #else //DACCESS_COMPILE |
186 | |
187 | return (m_cReferences == (UINT32)0); |
188 | #endif //DACCESS_COMPILE |
189 | } // LoaderAllocator::Release |
190 | |
191 | #ifndef DACCESS_COMPILE |
192 | #ifndef CROSSGEN_COMPILE |
193 | //--------------------------------------------------------------------------------------- |
194 | // |
195 | BOOL LoaderAllocator::CheckAddReference_Unlocked(LoaderAllocator *pOtherLA) |
196 | { |
197 | CONTRACTL |
198 | { |
199 | THROWS; |
200 | SO_INTOLERANT; |
201 | MODE_ANY; |
202 | } |
203 | CONTRACTL_END; |
204 | |
205 | // This must be checked before calling this function |
206 | _ASSERTE(pOtherLA != this); |
207 | |
208 | // This function requires the that loader allocator lock have been taken. |
209 | _ASSERTE(GetDomain()->GetLoaderAllocatorReferencesLock()->OwnedByCurrentThread()); |
210 | |
211 | if (m_LoaderAllocatorReferences.Lookup(pOtherLA) == NULL) |
212 | { |
213 | GCX_COOP(); |
214 | // Build a managed reference to keep the target object live |
215 | AllocateHandle(pOtherLA->GetExposedObject()); |
216 | |
217 | // Keep track of the references that have already been made |
218 | m_LoaderAllocatorReferences.Add(pOtherLA); |
219 | |
220 | // Notify the other LoaderAllocator that a reference exists |
221 | pOtherLA->AddReference(); |
222 | return TRUE; |
223 | } |
224 | |
225 | return FALSE; |
226 | } |
227 | |
228 | //--------------------------------------------------------------------------------------- |
229 | // |
230 | BOOL LoaderAllocator::EnsureReference(LoaderAllocator *pOtherLA) |
231 | { |
232 | CONTRACTL |
233 | { |
234 | THROWS; |
235 | SO_INTOLERANT; |
236 | MODE_ANY; |
237 | } |
238 | CONTRACTL_END; |
239 | |
240 | // Check if this lock can be taken in all places that the function is called |
241 | _ASSERTE(GetDomain()->GetLoaderAllocatorReferencesLock()->Debug_CanTake()); |
242 | |
243 | if (!IsCollectible()) |
244 | return FALSE; |
245 | |
246 | if (this == pOtherLA) |
247 | return FALSE; |
248 | |
249 | if (!pOtherLA->IsCollectible()) |
250 | return FALSE; |
251 | |
252 | CrstHolder ch(GetDomain()->GetLoaderAllocatorReferencesLock()); |
253 | return CheckAddReference_Unlocked(pOtherLA); |
254 | } |
255 | |
256 | BOOL LoaderAllocator::EnsureInstantiation(Module *pDefiningModule, Instantiation inst) |
257 | { |
258 | CONTRACTL |
259 | { |
260 | THROWS; |
261 | SO_INTOLERANT; |
262 | MODE_ANY; |
263 | } |
264 | CONTRACTL_END; |
265 | |
266 | BOOL fNewReferenceNeeded = FALSE; |
267 | |
268 | // Check if this lock can be taken in all places that the function is called |
269 | _ASSERTE(GetDomain()->GetLoaderAllocatorReferencesLock()->Debug_CanTake()); |
270 | |
271 | if (!IsCollectible()) |
272 | return FALSE; |
273 | |
274 | CrstHolder ch(GetDomain()->GetLoaderAllocatorReferencesLock()); |
275 | |
276 | if (pDefiningModule != NULL) |
277 | { |
278 | LoaderAllocator *pDefiningLoaderAllocator = pDefiningModule->GetLoaderAllocator(); |
279 | if (pDefiningLoaderAllocator->IsCollectible()) |
280 | { |
281 | if (pDefiningLoaderAllocator != this) |
282 | { |
283 | fNewReferenceNeeded = CheckAddReference_Unlocked(pDefiningLoaderAllocator) || fNewReferenceNeeded; |
284 | } |
285 | } |
286 | } |
287 | |
288 | for (DWORD i = 0; i < inst.GetNumArgs(); i++) |
289 | { |
290 | TypeHandle arg = inst[i]; |
291 | _ASSERTE(!arg.IsEncodedFixup()); |
292 | LoaderAllocator *pOtherLA = arg.GetLoaderModule()->GetLoaderAllocator(); |
293 | |
294 | if (pOtherLA == this) |
295 | continue; |
296 | |
297 | if (!pOtherLA->IsCollectible()) |
298 | continue; |
299 | |
300 | fNewReferenceNeeded = CheckAddReference_Unlocked(pOtherLA) || fNewReferenceNeeded; |
301 | } |
302 | |
303 | return fNewReferenceNeeded; |
304 | } |
305 | #else // CROSSGEN_COMPILE |
306 | BOOL LoaderAllocator::EnsureReference(LoaderAllocator *pOtherLA) |
307 | { |
308 | return FALSE; |
309 | } |
310 | |
311 | BOOL LoaderAllocator::EnsureInstantiation(Module *pDefiningModule, Instantiation inst) |
312 | { |
313 | return FALSE; |
314 | } |
315 | #endif // !CROSSGEN_COMPILE |
316 | |
317 | #ifndef CROSSGEN_COMPILE |
318 | bool LoaderAllocator::Marked() |
319 | { |
320 | LIMITED_METHOD_CONTRACT; |
321 | return m_fMarked; |
322 | } |
323 | |
324 | void LoaderAllocator::ClearMark() |
325 | { |
326 | LIMITED_METHOD_CONTRACT; |
327 | m_fMarked = false; |
328 | } |
329 | |
330 | void LoaderAllocator::Mark() |
331 | { |
332 | WRAPPER_NO_CONTRACT; |
333 | |
334 | if (!m_fMarked) |
335 | { |
336 | m_fMarked = true; |
337 | |
338 | LoaderAllocatorSet::Iterator iter = m_LoaderAllocatorReferences.Begin(); |
339 | while (iter != m_LoaderAllocatorReferences.End()) |
340 | { |
341 | LoaderAllocator *pAllocator = *iter; |
342 | pAllocator->Mark(); |
343 | iter++; |
344 | } |
345 | } |
346 | } |
347 | |
348 | //--------------------------------------------------------------------------------------- |
349 | // |
350 | // Collect unreferenced assemblies, remove them from the assembly list and return their loader allocator |
351 | // list. |
352 | // |
353 | //static |
354 | LoaderAllocator * LoaderAllocator::GCLoaderAllocators_RemoveAssemblies(AppDomain * pAppDomain) |
355 | { |
356 | CONTRACTL |
357 | { |
358 | THROWS; |
359 | GC_TRIGGERS; |
360 | MODE_PREEMPTIVE; |
361 | SO_INTOLERANT; |
362 | } |
363 | CONTRACTL_END; |
364 | // List of LoaderAllocators being deleted |
365 | LoaderAllocator * pFirstDestroyedLoaderAllocator = NULL; |
366 | |
367 | #if 0 |
368 | // Debug logic for debugging the loader allocator gc. |
369 | { |
370 | /* Iterate through every loader allocator, and print its current state */ |
371 | AppDomain::AssemblyIterator iData; |
372 | iData = pAppDomain->IterateAssembliesEx((AssemblyIterationFlags)( |
373 | kIncludeExecution | kIncludeLoaded | kIncludeCollected)); |
374 | CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly; |
375 | |
376 | while (iData.Next_Unlocked(pDomainAssembly.This())) |
377 | { |
378 | // The assembly could be collected (ref-count = 0), do not use holder which calls add-ref |
379 | Assembly * pAssembly = pDomainAssembly->GetLoadedAssembly(); |
380 | |
381 | if (pAssembly != NULL) |
382 | { |
383 | LoaderAllocator * pLoaderAllocator = pAssembly->GetLoaderAllocator(); |
384 | if (pLoaderAllocator->IsCollectible()) |
385 | { |
386 | printf("LA %p ReferencesTo %d\n" , pLoaderAllocator, pLoaderAllocator->m_cReferences); |
387 | LoaderAllocatorSet::Iterator iter = pLoaderAllocator->m_LoaderAllocatorReferences.Begin(); |
388 | while (iter != pLoaderAllocator->m_LoaderAllocatorReferences.End()) |
389 | { |
390 | LoaderAllocator * pAllocator = *iter; |
391 | printf("LARefTo: %p\n" , pAllocator); |
392 | iter++; |
393 | } |
394 | } |
395 | } |
396 | } |
397 | } |
398 | #endif //0 |
399 | |
400 | AppDomain::AssemblyIterator i; |
401 | // Iterate through every loader allocator, marking as we go |
402 | { |
403 | CrstHolder chAssemblyListLock(pAppDomain->GetAssemblyListLock()); |
404 | |
405 | i = pAppDomain->IterateAssembliesEx((AssemblyIterationFlags)( |
406 | kIncludeExecution | kIncludeLoaded | kIncludeCollected)); |
407 | CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly; |
408 | |
409 | while (i.Next_Unlocked(pDomainAssembly.This())) |
410 | { |
411 | // The assembly could be collected (ref-count = 0), do not use holder which calls add-ref |
412 | Assembly * pAssembly = pDomainAssembly->GetLoadedAssembly(); |
413 | |
414 | if (pAssembly != NULL) |
415 | { |
416 | LoaderAllocator * pLoaderAllocator = pAssembly->GetLoaderAllocator(); |
417 | if (pLoaderAllocator->IsCollectible()) |
418 | { |
419 | if (pLoaderAllocator->IsAlive()) |
420 | pLoaderAllocator->Mark(); |
421 | } |
422 | } |
423 | } |
424 | } |
425 | |
426 | // Iterate through every loader allocator, unmarking marked loaderallocators, and |
427 | // build a free list of unmarked ones |
428 | { |
429 | CrstHolder chLoaderAllocatorReferencesLock(pAppDomain->GetLoaderAllocatorReferencesLock()); |
430 | CrstHolder chAssemblyListLock(pAppDomain->GetAssemblyListLock()); |
431 | |
432 | i = pAppDomain->IterateAssembliesEx((AssemblyIterationFlags)( |
433 | kIncludeExecution | kIncludeLoaded | kIncludeCollected)); |
434 | CollectibleAssemblyHolder<DomainAssembly *> pDomainAssembly; |
435 | |
436 | while (i.Next_Unlocked(pDomainAssembly.This())) |
437 | { |
438 | // The assembly could be collected (ref-count = 0), do not use holder which calls add-ref |
439 | Assembly * pAssembly = pDomainAssembly->GetLoadedAssembly(); |
440 | |
441 | if (pAssembly != NULL) |
442 | { |
443 | LoaderAllocator * pLoaderAllocator = pAssembly->GetLoaderAllocator(); |
444 | if (pLoaderAllocator->IsCollectible()) |
445 | { |
446 | if (pLoaderAllocator->Marked()) |
447 | { |
448 | pLoaderAllocator->ClearMark(); |
449 | } |
450 | else if (!pLoaderAllocator->IsAlive()) |
451 | { |
452 | // Check that we don't have already this LoaderAllocator in the list to destroy |
453 | // (in case multiple assemblies are loaded in the same LoaderAllocator) |
454 | bool addAllocator = true; |
455 | LoaderAllocator * pCheckAllocatorToDestroy = pFirstDestroyedLoaderAllocator; |
456 | while (pCheckAllocatorToDestroy != NULL) |
457 | { |
458 | if (pCheckAllocatorToDestroy == pLoaderAllocator) |
459 | { |
460 | addAllocator = false; |
461 | break; |
462 | } |
463 | |
464 | pCheckAllocatorToDestroy = pCheckAllocatorToDestroy->m_pLoaderAllocatorDestroyNext; |
465 | } |
466 | |
467 | // Otherwise, we have a LoaderAllocator that we add to the list |
468 | if (addAllocator) |
469 | { |
470 | pLoaderAllocator->m_pLoaderAllocatorDestroyNext = pFirstDestroyedLoaderAllocator; |
471 | // We will store a reference to this assembly, and use it later in this function |
472 | pFirstDestroyedLoaderAllocator = pLoaderAllocator; |
473 | _ASSERTE(pLoaderAllocator->m_pFirstDomainAssemblyFromSameALCToDelete != NULL); |
474 | } |
475 | } |
476 | } |
477 | } |
478 | } |
479 | } |
480 | |
481 | // Iterate through free list, removing from Assembly list |
482 | LoaderAllocator * pDomainLoaderAllocatorDestroyIterator = pFirstDestroyedLoaderAllocator; |
483 | |
484 | while (pDomainLoaderAllocatorDestroyIterator != NULL) |
485 | { |
486 | _ASSERTE(!pDomainLoaderAllocatorDestroyIterator->IsAlive()); |
487 | |
488 | DomainAssemblyIterator domainAssemblyIt(pDomainLoaderAllocatorDestroyIterator->m_pFirstDomainAssemblyFromSameALCToDelete); |
489 | |
490 | // Release all assemblies from the same ALC |
491 | while (!domainAssemblyIt.end()) |
492 | { |
493 | DomainAssembly* domainAssemblyToRemove = domainAssemblyIt; |
494 | pAppDomain->RemoveAssembly(domainAssemblyToRemove); |
495 | |
496 | if (!domainAssemblyToRemove->GetAssembly()->IsDynamic()) |
497 | { |
498 | pAppDomain->RemoveFileFromCache(domainAssemblyToRemove->GetFile()); |
499 | AssemblySpec spec; |
500 | spec.InitializeSpec(domainAssemblyToRemove->GetFile()); |
501 | VERIFY(pAppDomain->RemoveAssemblyFromCache(domainAssemblyToRemove)); |
502 | pAppDomain->RemoveNativeImageDependency(&spec); |
503 | } |
504 | |
505 | domainAssemblyIt++; |
506 | } |
507 | |
508 | pDomainLoaderAllocatorDestroyIterator = pDomainLoaderAllocatorDestroyIterator->m_pLoaderAllocatorDestroyNext; |
509 | } |
510 | |
511 | return pFirstDestroyedLoaderAllocator; |
512 | } // LoaderAllocator::GCLoaderAllocators_RemoveAssemblies |
513 | |
514 | //--------------------------------------------------------------------------------------- |
515 | // |
516 | // Collect unreferenced assemblies, delete all their remaining resources. |
517 | // |
518 | //static |
519 | void LoaderAllocator::GCLoaderAllocators(LoaderAllocator* pOriginalLoaderAllocator) |
520 | { |
521 | CONTRACTL |
522 | { |
523 | THROWS; |
524 | GC_TRIGGERS; |
525 | MODE_PREEMPTIVE; |
526 | SO_INTOLERANT; |
527 | } |
528 | CONTRACTL_END; |
529 | |
530 | // List of LoaderAllocators being deleted |
531 | LoaderAllocator * pFirstDestroyedLoaderAllocator = NULL; |
532 | |
533 | AppDomain* pAppDomain = (AppDomain*)pOriginalLoaderAllocator->GetDomain(); |
534 | |
535 | // Collect all LoaderAllocators that don't have anymore DomainAssemblies alive |
536 | // Note: that it may not collect our pOriginalLoaderAllocator in case this |
537 | // LoaderAllocator hasn't loaded any DomainAssembly. We handle this case in the next loop. |
538 | // Note: The removed LoaderAllocators are not reachable outside of this function anymore, because we |
539 | // removed them from the assembly list |
540 | pFirstDestroyedLoaderAllocator = GCLoaderAllocators_RemoveAssemblies(pAppDomain); |
541 | |
542 | bool isOriginalLoaderAllocatorFound = false; |
543 | |
544 | // Iterate through free list, firing ETW events and notifying the debugger |
545 | LoaderAllocator * pDomainLoaderAllocatorDestroyIterator = pFirstDestroyedLoaderAllocator; |
546 | while (pDomainLoaderAllocatorDestroyIterator != NULL) |
547 | { |
548 | _ASSERTE(!pDomainLoaderAllocatorDestroyIterator->IsAlive()); |
549 | // Fire ETW event |
550 | ETW::LoaderLog::CollectibleLoaderAllocatorUnload((AssemblyLoaderAllocator *)pDomainLoaderAllocatorDestroyIterator); |
551 | |
552 | // Set the unloaded flag before notifying the debugger |
553 | pDomainLoaderAllocatorDestroyIterator->SetIsUnloaded(); |
554 | |
555 | DomainAssemblyIterator domainAssemblyIt(pDomainLoaderAllocatorDestroyIterator->m_pFirstDomainAssemblyFromSameALCToDelete); |
556 | while (!domainAssemblyIt.end()) |
557 | { |
558 | // Notify the debugger |
559 | domainAssemblyIt->NotifyDebuggerUnload(); |
560 | domainAssemblyIt++; |
561 | } |
562 | |
563 | if (pDomainLoaderAllocatorDestroyIterator == pOriginalLoaderAllocator) |
564 | { |
565 | isOriginalLoaderAllocatorFound = true; |
566 | } |
567 | pDomainLoaderAllocatorDestroyIterator = pDomainLoaderAllocatorDestroyIterator->m_pLoaderAllocatorDestroyNext; |
568 | } |
569 | |
570 | // If the original LoaderAllocator was not processed, it is most likely a LoaderAllocator without any loaded DomainAssembly |
571 | // But we still want to collect it so we add it to the list of LoaderAllocator to destroy |
572 | if (!isOriginalLoaderAllocatorFound && !pOriginalLoaderAllocator->IsAlive()) |
573 | { |
574 | pOriginalLoaderAllocator->m_pLoaderAllocatorDestroyNext = pFirstDestroyedLoaderAllocator; |
575 | pFirstDestroyedLoaderAllocator = pOriginalLoaderAllocator; |
576 | } |
577 | |
578 | // Iterate through free list, deleting DomainAssemblies |
579 | pDomainLoaderAllocatorDestroyIterator = pFirstDestroyedLoaderAllocator; |
580 | while (pDomainLoaderAllocatorDestroyIterator != NULL) |
581 | { |
582 | _ASSERTE(!pDomainLoaderAllocatorDestroyIterator->IsAlive()); |
583 | |
584 | DomainAssemblyIterator domainAssemblyIt(pDomainLoaderAllocatorDestroyIterator->m_pFirstDomainAssemblyFromSameALCToDelete); |
585 | while (!domainAssemblyIt.end()) |
586 | { |
587 | delete (DomainAssembly*)domainAssemblyIt; |
588 | domainAssemblyIt++; |
589 | } |
590 | // We really don't have to set it to NULL as the assembly is not reachable anymore, but just in case ... |
591 | // (Also debugging NULL AVs if someone uses it accidentally is so much easier) |
592 | pDomainLoaderAllocatorDestroyIterator->m_pFirstDomainAssemblyFromSameALCToDelete = NULL; |
593 | |
594 | pDomainLoaderAllocatorDestroyIterator->ReleaseManagedAssemblyLoadContext(); |
595 | |
596 | // The following code was previously happening on delete ~DomainAssembly->Terminate |
597 | // We are moving this part here in order to make sure that we can unload a LoaderAllocator |
598 | // that didn't have a DomainAssembly |
599 | // (we have now a LoaderAllocator with 0-n DomainAssembly) |
600 | |
601 | // This cleanup code starts resembling parts of AppDomain::Terminate too much. |
602 | // It would be useful to reduce duplication and also establish clear responsibilites |
603 | // for LoaderAllocator::Destroy, Assembly::Terminate, LoaderAllocator::Terminate |
604 | // and LoaderAllocator::~LoaderAllocator. We need to establish how these |
605 | // cleanup paths interact with app-domain unload and process tear-down, too. |
606 | |
607 | if (!IsAtProcessExit()) |
608 | { |
609 | // Suspend the EE to do some clean up that can only occur |
610 | // while no threads are running. |
611 | GCX_COOP(); // SuspendEE may require current thread to be in Coop mode |
612 | // SuspendEE cares about the reason flag only when invoked for a GC |
613 | // Other values are typically ignored. If using SUSPEND_FOR_APPDOMAIN_SHUTDOWN |
614 | // is inappropriate, we can introduce a new flag or hijack an unused one. |
615 | ThreadSuspend::SuspendEE(ThreadSuspend::SUSPEND_FOR_APPDOMAIN_SHUTDOWN); |
616 | } |
617 | |
618 | ExecutionManager::Unload(pDomainLoaderAllocatorDestroyIterator); |
619 | pDomainLoaderAllocatorDestroyIterator->UninitVirtualCallStubManager(); |
620 | |
621 | // TODO: Do we really want to perform this on each LoaderAllocator? |
622 | MethodTable::ClearMethodDataCache(); |
623 | ClearJitGenericHandleCache(pAppDomain); |
624 | |
625 | if (!IsAtProcessExit()) |
626 | { |
627 | // Resume the EE. |
628 | ThreadSuspend::RestartEE(FALSE, TRUE); |
629 | } |
630 | |
631 | // Because RegisterLoaderAllocatorForDeletion is modifying m_pLoaderAllocatorDestroyNext, we are saving it here |
632 | LoaderAllocator* pLoaderAllocatorDestroyNext = pDomainLoaderAllocatorDestroyIterator->m_pLoaderAllocatorDestroyNext; |
633 | |
634 | // Register this LoaderAllocator for cleanup |
635 | pAppDomain->RegisterLoaderAllocatorForDeletion(pDomainLoaderAllocatorDestroyIterator); |
636 | |
637 | // Go to next |
638 | pDomainLoaderAllocatorDestroyIterator = pLoaderAllocatorDestroyNext; |
639 | } |
640 | |
641 | // Deleting the DomainAssemblies will have created a list of LoaderAllocator's on the AppDomain |
642 | // Call this shutdown function to clean those up. |
643 | pAppDomain->ShutdownFreeLoaderAllocators(TRUE); |
644 | } // LoaderAllocator::GCLoaderAllocators |
645 | |
646 | //--------------------------------------------------------------------------------------- |
647 | // |
648 | //static |
649 | BOOL QCALLTYPE LoaderAllocator::Destroy(QCall::LoaderAllocatorHandle pLoaderAllocator) |
650 | { |
651 | QCALL_CONTRACT; |
652 | |
653 | BOOL ret = FALSE; |
654 | |
655 | BEGIN_QCALL; |
656 | |
657 | if (ObjectHandleIsNull(pLoaderAllocator->GetLoaderAllocatorObjectHandle())) |
658 | { |
659 | STRESS_LOG1(LF_CLASSLOADER, LL_INFO100, "Begin LoaderAllocator::Destroy for loader allocator %p\n" , reinterpret_cast<void *>(static_cast<PTR_LoaderAllocator>(pLoaderAllocator))); |
660 | LoaderAllocatorID *pID = pLoaderAllocator->Id(); |
661 | |
662 | // This will probably change for shared code unloading |
663 | _ASSERTE(pID->GetType() == LAT_Assembly); |
664 | |
665 | #ifdef FEATURE_COMINTEROP |
666 | if (pLoaderAllocator->m_pComCallWrapperCache) |
667 | { |
668 | pLoaderAllocator->m_pComCallWrapperCache->Release(); |
669 | |
670 | // if the above released the wrapper cache, then it will call back and reset our |
671 | // m_pComCallWrapperCache to null. |
672 | if (!pLoaderAllocator->m_pComCallWrapperCache) |
673 | { |
674 | LOG((LF_CLASSLOADER, LL_INFO10, "LoaderAllocator::Destroy ComCallWrapperCache released\n" )); |
675 | } |
676 | #ifdef _DEBUG |
677 | else |
678 | { |
679 | pLoaderAllocator->m_pComCallWrapperCache = NULL; |
680 | LOG((LF_CLASSLOADER, LL_INFO10, "LoaderAllocator::Destroy ComCallWrapperCache not released\n" )); |
681 | } |
682 | #endif // _DEBUG |
683 | } |
684 | #endif // FEATURE_COMINTEROP |
685 | |
686 | DomainAssembly* pDomainAssembly = (DomainAssembly*)(pID->GetDomainAssemblyIterator()); |
687 | if (pDomainAssembly != NULL) |
688 | { |
689 | Assembly *pAssembly = pDomainAssembly->GetCurrentAssembly(); |
690 | |
691 | //if not fully loaded, it is still domain specific, so just get one from DomainAssembly |
692 | BaseDomain *pDomain = pAssembly ? pAssembly->Parent() : pDomainAssembly->GetAppDomain(); |
693 | |
694 | // This will probably change for shared code unloading |
695 | _ASSERTE(pDomain->IsAppDomain()); |
696 | |
697 | AppDomain *pAppDomain = pDomain->AsAppDomain(); |
698 | pLoaderAllocator->m_pFirstDomainAssemblyFromSameALCToDelete = pAssembly->GetDomainAssembly(pAppDomain); |
699 | } |
700 | |
701 | // Iterate through all references to other loader allocators and decrement their reference |
702 | // count |
703 | LoaderAllocatorSet::Iterator iter = pLoaderAllocator->m_LoaderAllocatorReferences.Begin(); |
704 | while (iter != pLoaderAllocator->m_LoaderAllocatorReferences.End()) |
705 | { |
706 | LoaderAllocator *pAllocator = *iter; |
707 | pAllocator->Release(); |
708 | iter++; |
709 | } |
710 | |
711 | // Release this loader allocator |
712 | BOOL fIsLastReferenceReleased = pLoaderAllocator->Release(); |
713 | |
714 | // If the reference count on this assembly got to 0, then a LoaderAllocator may |
715 | // be able to be collected, thus, perform a garbage collection. |
716 | // The reference count is setup such that in the case of non-trivial graphs, the reference count |
717 | // may hit zero early. |
718 | if (fIsLastReferenceReleased) |
719 | { |
720 | LoaderAllocator::GCLoaderAllocators(pLoaderAllocator); |
721 | } |
722 | STRESS_LOG1(LF_CLASSLOADER, LL_INFO100, "End LoaderAllocator::Destroy for loader allocator %p\n" , reinterpret_cast<void *>(static_cast<PTR_LoaderAllocator>(pLoaderAllocator))); |
723 | |
724 | ret = TRUE; |
725 | } |
726 | |
727 | END_QCALL; |
728 | |
729 | return ret; |
730 | } // LoaderAllocator::Destroy |
731 | |
732 | #define MAX_LOADERALLOCATOR_HANDLE 0x40000000 |
733 | |
734 | // Returns NULL if the managed LoaderAllocator object was already collected. |
735 | LOADERHANDLE LoaderAllocator::AllocateHandle(OBJECTREF value) |
736 | { |
737 | CONTRACTL |
738 | { |
739 | THROWS; |
740 | GC_TRIGGERS; |
741 | MODE_COOPERATIVE; |
742 | } |
743 | CONTRACTL_END; |
744 | |
745 | LOADERHANDLE retVal; |
746 | |
747 | struct _gc |
748 | { |
749 | OBJECTREF value; |
750 | LOADERALLOCATORREF loaderAllocator; |
751 | PTRARRAYREF handleTable; |
752 | PTRARRAYREF handleTableOld; |
753 | } gc; |
754 | |
755 | ZeroMemory(&gc, sizeof(gc)); |
756 | |
757 | GCPROTECT_BEGIN(gc); |
758 | |
759 | gc.value = value; |
760 | |
761 | // The handle table is read locklessly, be careful |
762 | if (IsCollectible()) |
763 | { |
764 | gc.loaderAllocator = (LOADERALLOCATORREF)ObjectFromHandle(m_hLoaderAllocatorObjectHandle); |
765 | if (gc.loaderAllocator == NULL) |
766 | { // The managed LoaderAllocator is already collected, we cannot allocate any exposed managed objects for it |
767 | retVal = NULL; |
768 | } |
769 | else |
770 | { |
771 | DWORD slotsUsed; |
772 | DWORD numComponents; |
773 | |
774 | do |
775 | { |
776 | { |
777 | CrstHolder ch(&m_crstLoaderAllocator); |
778 | |
779 | gc.handleTable = gc.loaderAllocator->GetHandleTable(); |
780 | |
781 | if (!m_freeHandleIndexesStack.IsEmpty()) |
782 | { |
783 | // Reuse a handle slot that was previously freed |
784 | DWORD freeHandleIndex = m_freeHandleIndexesStack.Pop(); |
785 | gc.handleTable->SetAt(freeHandleIndex, gc.value); |
786 | retVal = (UINT_PTR)((freeHandleIndex + 1) << 1); |
787 | break; |
788 | } |
789 | |
790 | slotsUsed = gc.loaderAllocator->GetSlotsUsed(); |
791 | |
792 | if (slotsUsed > MAX_LOADERALLOCATOR_HANDLE) |
793 | { |
794 | COMPlusThrowOM(); |
795 | } |
796 | |
797 | numComponents = gc.handleTable->GetNumComponents(); |
798 | |
799 | if (slotsUsed < numComponents) |
800 | { |
801 | // The handle table is large enough, allocate next slot from it |
802 | gc.handleTable->SetAt(slotsUsed, gc.value); |
803 | gc.loaderAllocator->SetSlotsUsed(slotsUsed + 1); |
804 | retVal = (UINT_PTR)((slotsUsed + 1) << 1); |
805 | break; |
806 | } |
807 | } |
808 | |
809 | // We need to enlarge the handle table |
810 | gc.handleTableOld = gc.handleTable; |
811 | |
812 | DWORD newSize = numComponents * 2; |
813 | gc.handleTable = (PTRARRAYREF)AllocateObjectArray(newSize, g_pObjectClass); |
814 | |
815 | { |
816 | CrstHolder ch(&m_crstLoaderAllocator); |
817 | |
818 | if (gc.loaderAllocator->GetHandleTable() == gc.handleTableOld) |
819 | { |
820 | /* Copy out of old array */ |
821 | memmoveGCRefs(gc.handleTable->GetDataPtr(), gc.handleTableOld->GetDataPtr(), slotsUsed * sizeof(Object *)); |
822 | gc.loaderAllocator->SetHandleTable(gc.handleTable); |
823 | } |
824 | else |
825 | { |
826 | // Another thread has beaten us on enlarging the handle array, use the handle table it has allocated |
827 | gc.handleTable = gc.loaderAllocator->GetHandleTable(); |
828 | } |
829 | |
830 | slotsUsed = gc.loaderAllocator->GetSlotsUsed(); |
831 | numComponents = gc.handleTable->GetNumComponents(); |
832 | |
833 | if (slotsUsed < numComponents) |
834 | { |
835 | // The handle table is large enough, allocate next slot from it |
836 | gc.handleTable->SetAt(slotsUsed, gc.value); |
837 | gc.loaderAllocator->SetSlotsUsed(slotsUsed + 1); |
838 | retVal = (UINT_PTR)((slotsUsed + 1) << 1); |
839 | break; |
840 | } |
841 | } |
842 | |
843 | // Loop in the unlikely case that another thread has beaten us on the handle array enlarging, but |
844 | // all the slots were used up before the current thread was scheduled. |
845 | } |
846 | while (true); |
847 | } |
848 | } |
849 | else |
850 | { |
851 | OBJECTREF* pRef = GetDomain()->AllocateObjRefPtrsInLargeTable(1); |
852 | SetObjectReference(pRef, gc.value, GetDomain()->AsAppDomain()); |
853 | retVal = (((UINT_PTR)pRef) + 1); |
854 | } |
855 | |
856 | GCPROTECT_END(); |
857 | |
858 | return retVal; |
859 | } |
860 | |
861 | OBJECTREF LoaderAllocator::GetHandleValue(LOADERHANDLE handle) |
862 | { |
863 | CONTRACTL |
864 | { |
865 | NOTHROW; |
866 | GC_NOTRIGGER; |
867 | MODE_COOPERATIVE; |
868 | SO_TOLERANT; |
869 | } |
870 | CONTRACTL_END; |
871 | |
872 | OBJECTREF objRet = NULL; |
873 | GET_LOADERHANDLE_VALUE_FAST(this, handle, &objRet); |
874 | return objRet; |
875 | } |
876 | |
877 | void LoaderAllocator::FreeHandle(LOADERHANDLE handle) |
878 | { |
879 | CONTRACTL |
880 | { |
881 | NOTHROW; |
882 | GC_NOTRIGGER; |
883 | MODE_ANY; |
884 | PRECONDITION(handle != NULL); |
885 | } |
886 | CONTRACTL_END; |
887 | |
888 | SetHandleValue(handle, NULL); |
889 | |
890 | if ((((UINT_PTR)handle) & 1) == 0) |
891 | { |
892 | // The slot value doesn't have the low bit set, so it is an index to the handle table. |
893 | // In this case, push the index of the handle to the stack of freed indexes for |
894 | // reuse. |
895 | CrstHolder ch(&m_crstLoaderAllocator); |
896 | |
897 | UINT_PTR index = (((UINT_PTR)handle) >> 1) - 1; |
898 | // The Push can fail due to OOM. Ignore this failure, it is better than crashing. The |
899 | // only effect is that the slot will not be reused in the future if the runtime survives |
900 | // the low memory situation. |
901 | m_freeHandleIndexesStack.Push((DWORD)index); |
902 | } |
903 | } |
904 | |
905 | OBJECTREF LoaderAllocator::CompareExchangeValueInHandle(LOADERHANDLE handle, OBJECTREF valueUNSAFE, OBJECTREF compareUNSAFE) |
906 | { |
907 | CONTRACTL |
908 | { |
909 | THROWS; |
910 | GC_TRIGGERS; |
911 | MODE_COOPERATIVE; |
912 | PRECONDITION(handle != NULL); |
913 | } |
914 | CONTRACTL_END; |
915 | |
916 | OBJECTREF retVal; |
917 | |
918 | struct _gc |
919 | { |
920 | OBJECTREF value; |
921 | OBJECTREF compare; |
922 | OBJECTREF previous; |
923 | } gc; |
924 | |
925 | ZeroMemory(&gc, sizeof(gc)); |
926 | GCPROTECT_BEGIN(gc); |
927 | |
928 | gc.value = valueUNSAFE; |
929 | gc.compare = compareUNSAFE; |
930 | |
931 | if ((((UINT_PTR)handle) & 1) != 0) |
932 | { |
933 | OBJECTREF *ptr = (OBJECTREF *)(((UINT_PTR)handle) - 1); |
934 | gc.previous = *ptr; |
935 | if ((*ptr) == gc.compare) |
936 | { |
937 | SetObjectReference(ptr, gc.value, GetDomain()->AsAppDomain()); |
938 | } |
939 | } |
940 | else |
941 | { |
942 | /* The handle table is read locklessly, be careful */ |
943 | CrstHolder ch(&m_crstLoaderAllocator); |
944 | |
945 | _ASSERTE(!ObjectHandleIsNull(m_hLoaderAllocatorObjectHandle)); |
946 | |
947 | UINT_PTR index = (((UINT_PTR)handle) >> 1) - 1; |
948 | LOADERALLOCATORREF loaderAllocator = (LOADERALLOCATORREF)ObjectFromHandle(m_hLoaderAllocatorObjectHandle); |
949 | PTRARRAYREF handleTable = loaderAllocator->GetHandleTable(); |
950 | |
951 | gc.previous = handleTable->GetAt(index); |
952 | if (gc.previous == gc.compare) |
953 | { |
954 | handleTable->SetAt(index, gc.value); |
955 | } |
956 | } |
957 | |
958 | retVal = gc.previous; |
959 | GCPROTECT_END(); |
960 | |
961 | return retVal; |
962 | } |
963 | |
964 | void LoaderAllocator::SetHandleValue(LOADERHANDLE handle, OBJECTREF value) |
965 | { |
966 | CONTRACTL |
967 | { |
968 | NOTHROW; |
969 | GC_NOTRIGGER; |
970 | MODE_ANY; |
971 | PRECONDITION(handle != NULL); |
972 | } |
973 | CONTRACTL_END; |
974 | |
975 | GCX_COOP(); |
976 | |
977 | GCPROTECT_BEGIN(value); |
978 | |
979 | // If the slot value does have the low bit set, then it is a simple pointer to the value |
980 | // Otherwise, we will need a more complicated operation to clear the value. |
981 | if ((((UINT_PTR)handle) & 1) != 0) |
982 | { |
983 | OBJECTREF *ptr = (OBJECTREF *)(((UINT_PTR)handle) - 1); |
984 | SetObjectReference(ptr, value, GetDomain()->AsAppDomain()); |
985 | } |
986 | else |
987 | { |
988 | // The handle table is read locklessly, be careful |
989 | CrstHolder ch(&m_crstLoaderAllocator); |
990 | |
991 | _ASSERTE(!ObjectHandleIsNull(m_hLoaderAllocatorObjectHandle)); |
992 | |
993 | UINT_PTR index = (((UINT_PTR)handle) >> 1) - 1; |
994 | LOADERALLOCATORREF loaderAllocator = (LOADERALLOCATORREF)ObjectFromHandle(m_hLoaderAllocatorObjectHandle); |
995 | PTRARRAYREF handleTable = loaderAllocator->GetHandleTable(); |
996 | handleTable->SetAt(index, value); |
997 | } |
998 | |
999 | GCPROTECT_END(); |
1000 | |
1001 | return; |
1002 | } |
1003 | |
1004 | void LoaderAllocator::SetupManagedTracking(LOADERALLOCATORREF * pKeepLoaderAllocatorAlive) |
1005 | { |
1006 | STANDARD_VM_CONTRACT; |
1007 | |
1008 | GCInterface::AddMemoryPressure(30000); |
1009 | m_fGCPressure = true; |
1010 | |
1011 | GCX_COOP(); |
1012 | |
1013 | // |
1014 | // Initialize managed loader allocator reference holder |
1015 | // |
1016 | |
1017 | MethodTable *pMT = MscorlibBinder::GetClass(CLASS__LOADERALLOCATOR); |
1018 | |
1019 | *pKeepLoaderAllocatorAlive = (LOADERALLOCATORREF)AllocateObject(pMT); |
1020 | |
1021 | MethodDescCallSite initLoaderAllocator(METHOD__LOADERALLOCATOR__CTOR, (OBJECTREF *)pKeepLoaderAllocatorAlive); |
1022 | |
1023 | ARG_SLOT args[] = { |
1024 | ObjToArgSlot(*pKeepLoaderAllocatorAlive) |
1025 | }; |
1026 | |
1027 | initLoaderAllocator.Call(args); |
1028 | |
1029 | m_hLoaderAllocatorObjectHandle = GetDomain()->CreateLongWeakHandle(*pKeepLoaderAllocatorAlive); |
1030 | |
1031 | RegisterHandleForCleanup(m_hLoaderAllocatorObjectHandle); |
1032 | } |
1033 | |
1034 | void LoaderAllocator::ActivateManagedTracking() |
1035 | { |
1036 | CONTRACTL |
1037 | { |
1038 | NOTHROW; |
1039 | GC_TRIGGERS; |
1040 | FORBID_FAULT; |
1041 | MODE_ANY; |
1042 | } |
1043 | CONTRACTL_END |
1044 | |
1045 | GCX_COOP(); |
1046 | |
1047 | // There is now one external reference to this LoaderAllocator (the managed scout) |
1048 | _ASSERTE(m_cReferences == (UINT32)-1); |
1049 | m_cReferences = (UINT32)1; |
1050 | |
1051 | LOADERALLOCATORREF loaderAllocator = (LOADERALLOCATORREF)ObjectFromHandle(m_hLoaderAllocatorObjectHandle); |
1052 | loaderAllocator->SetNativeLoaderAllocator(this); |
1053 | } |
1054 | #endif // !CROSSGEN_COMPILE |
1055 | |
1056 | |
1057 | // We don't actually allocate a low frequency heap for collectible types. |
1058 | // This is carefully tuned to sum up to 16 pages to reduce waste. |
1059 | #define COLLECTIBLE_LOW_FREQUENCY_HEAP_SIZE (0 * GetOsPageSize()) |
1060 | #define COLLECTIBLE_HIGH_FREQUENCY_HEAP_SIZE (3 * GetOsPageSize()) |
1061 | #define COLLECTIBLE_STUB_HEAP_SIZE GetOsPageSize() |
1062 | #define COLLECTIBLE_CODEHEAP_SIZE (7 * GetOsPageSize()) |
1063 | #define COLLECTIBLE_VIRTUALSTUBDISPATCH_HEAP_SPACE (5 * GetOsPageSize()) |
1064 | |
1065 | void LoaderAllocator::Init(BaseDomain *pDomain, BYTE *pExecutableHeapMemory) |
1066 | { |
1067 | STANDARD_VM_CONTRACT; |
1068 | |
1069 | m_pDomain = pDomain; |
1070 | |
1071 | m_crstLoaderAllocator.Init(CrstLoaderAllocator, (CrstFlags)CRST_UNSAFE_COOPGC); |
1072 | #ifdef FEATURE_COMINTEROP |
1073 | m_InteropDataCrst.Init(CrstInteropData, CRST_REENTRANCY); |
1074 | m_ComCallWrapperCrst.Init(CrstCOMCallWrapper); |
1075 | #endif |
1076 | |
1077 | // |
1078 | // Initialize the heaps |
1079 | // |
1080 | |
1081 | DWORD dwLowFrequencyHeapReserveSize; |
1082 | DWORD dwHighFrequencyHeapReserveSize; |
1083 | DWORD dwStubHeapReserveSize; |
1084 | DWORD dwExecutableHeapReserveSize; |
1085 | DWORD dwCodeHeapReserveSize; |
1086 | DWORD dwVSDHeapReserveSize; |
1087 | |
1088 | dwExecutableHeapReserveSize = 0; |
1089 | |
1090 | if (IsCollectible()) |
1091 | { |
1092 | dwLowFrequencyHeapReserveSize = COLLECTIBLE_LOW_FREQUENCY_HEAP_SIZE; |
1093 | dwHighFrequencyHeapReserveSize = COLLECTIBLE_HIGH_FREQUENCY_HEAP_SIZE; |
1094 | dwStubHeapReserveSize = COLLECTIBLE_STUB_HEAP_SIZE; |
1095 | dwCodeHeapReserveSize = COLLECTIBLE_CODEHEAP_SIZE; |
1096 | dwVSDHeapReserveSize = COLLECTIBLE_VIRTUALSTUBDISPATCH_HEAP_SPACE; |
1097 | } |
1098 | else |
1099 | { |
1100 | dwLowFrequencyHeapReserveSize = LOW_FREQUENCY_HEAP_RESERVE_SIZE; |
1101 | dwHighFrequencyHeapReserveSize = HIGH_FREQUENCY_HEAP_RESERVE_SIZE; |
1102 | dwStubHeapReserveSize = STUB_HEAP_RESERVE_SIZE; |
1103 | |
1104 | // Non-collectible assemblies do not reserve space for these heaps. |
1105 | dwCodeHeapReserveSize = 0; |
1106 | dwVSDHeapReserveSize = 0; |
1107 | } |
1108 | |
1109 | // The global heap needs a bit of space for executable memory that is not associated with a rangelist. |
1110 | // Take a page from the high-frequency heap for this. |
1111 | if (pExecutableHeapMemory != NULL) |
1112 | { |
1113 | #ifdef FEATURE_WINDOWSPHONE |
1114 | // code:UMEntryThunk::CreateUMEntryThunk allocates memory on executable loader heap for phone. |
1115 | // Reserve enough for a typical phone app to fit. |
1116 | dwExecutableHeapReserveSize = 3 * GetOsPageSize(); |
1117 | #else |
1118 | dwExecutableHeapReserveSize = GetOsPageSize(); |
1119 | #endif |
1120 | |
1121 | _ASSERTE(dwExecutableHeapReserveSize < dwHighFrequencyHeapReserveSize); |
1122 | dwHighFrequencyHeapReserveSize -= dwExecutableHeapReserveSize; |
1123 | } |
1124 | |
1125 | DWORD dwTotalReserveMemSize = dwLowFrequencyHeapReserveSize |
1126 | + dwHighFrequencyHeapReserveSize |
1127 | + dwStubHeapReserveSize |
1128 | + dwCodeHeapReserveSize |
1129 | + dwVSDHeapReserveSize |
1130 | + dwExecutableHeapReserveSize; |
1131 | |
1132 | dwTotalReserveMemSize = (DWORD) ALIGN_UP(dwTotalReserveMemSize, VIRTUAL_ALLOC_RESERVE_GRANULARITY); |
1133 | |
1134 | #if !defined(_WIN64) |
1135 | // Make sure that we reserve as little as possible on 32-bit to save address space |
1136 | _ASSERTE(dwTotalReserveMemSize <= VIRTUAL_ALLOC_RESERVE_GRANULARITY); |
1137 | #endif |
1138 | |
1139 | BYTE * initReservedMem = ClrVirtualAllocExecutable(dwTotalReserveMemSize, MEM_RESERVE, PAGE_NOACCESS); |
1140 | |
1141 | m_InitialReservedMemForLoaderHeaps = initReservedMem; |
1142 | |
1143 | if (initReservedMem == NULL) |
1144 | COMPlusThrowOM(); |
1145 | |
1146 | if (IsCollectible()) |
1147 | { |
1148 | m_pCodeHeapInitialAlloc = initReservedMem; |
1149 | initReservedMem += dwCodeHeapReserveSize; |
1150 | m_pVSDHeapInitialAlloc = initReservedMem; |
1151 | initReservedMem += dwVSDHeapReserveSize; |
1152 | } |
1153 | else |
1154 | { |
1155 | _ASSERTE((dwCodeHeapReserveSize == 0) && (m_pCodeHeapInitialAlloc == NULL)); |
1156 | _ASSERTE((dwVSDHeapReserveSize == 0) && (m_pVSDHeapInitialAlloc == NULL)); |
1157 | } |
1158 | |
1159 | if (dwLowFrequencyHeapReserveSize != 0) |
1160 | { |
1161 | _ASSERTE(!IsCollectible()); |
1162 | |
1163 | m_pLowFrequencyHeap = new (&m_LowFreqHeapInstance) LoaderHeap(LOW_FREQUENCY_HEAP_RESERVE_SIZE, |
1164 | LOW_FREQUENCY_HEAP_COMMIT_SIZE, |
1165 | initReservedMem, |
1166 | dwLowFrequencyHeapReserveSize, |
1167 | LOADERHEAP_PROFILE_COUNTER); |
1168 | initReservedMem += dwLowFrequencyHeapReserveSize; |
1169 | } |
1170 | |
1171 | if (dwExecutableHeapReserveSize != 0) |
1172 | { |
1173 | _ASSERTE(!IsCollectible()); |
1174 | |
1175 | m_pExecutableHeap = new (pExecutableHeapMemory) LoaderHeap(STUB_HEAP_RESERVE_SIZE, |
1176 | STUB_HEAP_COMMIT_SIZE, |
1177 | initReservedMem, |
1178 | dwExecutableHeapReserveSize, |
1179 | LOADERHEAP_PROFILE_COUNTER, |
1180 | NULL, |
1181 | TRUE /* Make heap executable */ |
1182 | ); |
1183 | initReservedMem += dwExecutableHeapReserveSize; |
1184 | } |
1185 | |
1186 | m_pHighFrequencyHeap = new (&m_HighFreqHeapInstance) LoaderHeap(HIGH_FREQUENCY_HEAP_RESERVE_SIZE, |
1187 | HIGH_FREQUENCY_HEAP_COMMIT_SIZE, |
1188 | initReservedMem, |
1189 | dwHighFrequencyHeapReserveSize, |
1190 | LOADERHEAP_PROFILE_COUNTER); |
1191 | initReservedMem += dwHighFrequencyHeapReserveSize; |
1192 | |
1193 | if (IsCollectible()) |
1194 | m_pLowFrequencyHeap = m_pHighFrequencyHeap; |
1195 | |
1196 | #if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO) |
1197 | m_pHighFrequencyHeap->m_fPermitStubsWithUnwindInfo = TRUE; |
1198 | #endif |
1199 | |
1200 | m_pStubHeap = new (&m_StubHeapInstance) LoaderHeap(STUB_HEAP_RESERVE_SIZE, |
1201 | STUB_HEAP_COMMIT_SIZE, |
1202 | initReservedMem, |
1203 | dwStubHeapReserveSize, |
1204 | LOADERHEAP_PROFILE_COUNTER, |
1205 | STUBMANAGER_RANGELIST(StubLinkStubManager), |
1206 | TRUE /* Make heap executable */); |
1207 | |
1208 | initReservedMem += dwStubHeapReserveSize; |
1209 | |
1210 | #if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO) |
1211 | m_pStubHeap->m_fPermitStubsWithUnwindInfo = TRUE; |
1212 | #endif |
1213 | |
1214 | #ifdef CROSSGEN_COMPILE |
1215 | m_pPrecodeHeap = new (&m_PrecodeHeapInstance) LoaderHeap(GetOsPageSize(), GetOsPageSize()); |
1216 | #else |
1217 | m_pPrecodeHeap = new (&m_PrecodeHeapInstance) CodeFragmentHeap(this, STUB_CODE_BLOCK_PRECODE); |
1218 | #endif |
1219 | |
1220 | // Set up the IL stub cache |
1221 | m_ILStubCache.Init(m_pHighFrequencyHeap); |
1222 | |
1223 | #ifdef FEATURE_COMINTEROP |
1224 | // Init the COM Interop data hash |
1225 | { |
1226 | LockOwner lock = { &m_InteropDataCrst, IsOwnerOfCrst }; |
1227 | m_interopDataHash.Init(0, NULL, false, &lock); |
1228 | } |
1229 | #endif // FEATURE_COMINTEROP |
1230 | } |
1231 | |
1232 | |
1233 | #ifndef CROSSGEN_COMPILE |
1234 | |
1235 | #ifdef FEATURE_READYTORUN |
1236 | PTR_CodeFragmentHeap LoaderAllocator::GetDynamicHelpersHeap() |
1237 | { |
1238 | CONTRACTL { |
1239 | THROWS; |
1240 | MODE_ANY; |
1241 | } CONTRACTL_END; |
1242 | |
1243 | if (m_pDynamicHelpersHeap == NULL) |
1244 | { |
1245 | CodeFragmentHeap * pDynamicHelpersHeap = new CodeFragmentHeap(this, STUB_CODE_BLOCK_DYNAMICHELPER); |
1246 | if (InterlockedCompareExchangeT(&m_pDynamicHelpersHeap, pDynamicHelpersHeap, NULL) != NULL) |
1247 | delete pDynamicHelpersHeap; |
1248 | } |
1249 | return m_pDynamicHelpersHeap; |
1250 | } |
1251 | #endif |
1252 | |
1253 | FuncPtrStubs * LoaderAllocator::GetFuncPtrStubs() |
1254 | { |
1255 | CONTRACTL { |
1256 | THROWS; |
1257 | MODE_ANY; |
1258 | } CONTRACTL_END; |
1259 | |
1260 | if (m_pFuncPtrStubs == NULL) |
1261 | { |
1262 | FuncPtrStubs * pFuncPtrStubs = new FuncPtrStubs(); |
1263 | if (InterlockedCompareExchangeT(&m_pFuncPtrStubs, pFuncPtrStubs, NULL) != NULL) |
1264 | delete pFuncPtrStubs; |
1265 | } |
1266 | return m_pFuncPtrStubs; |
1267 | } |
1268 | |
1269 | BYTE *LoaderAllocator::GetVSDHeapInitialBlock(DWORD *pSize) |
1270 | { |
1271 | LIMITED_METHOD_CONTRACT; |
1272 | |
1273 | *pSize = 0; |
1274 | BYTE *buffer = InterlockedCompareExchangeT(&m_pVSDHeapInitialAlloc, NULL, m_pVSDHeapInitialAlloc); |
1275 | if (buffer != NULL) |
1276 | { |
1277 | *pSize = COLLECTIBLE_VIRTUALSTUBDISPATCH_HEAP_SPACE; |
1278 | } |
1279 | return buffer; |
1280 | } |
1281 | |
1282 | BYTE *LoaderAllocator::GetCodeHeapInitialBlock(const BYTE * loAddr, const BYTE * hiAddr, DWORD minimumSize, DWORD *pSize) |
1283 | { |
1284 | LIMITED_METHOD_CONTRACT; |
1285 | |
1286 | *pSize = 0; |
1287 | // Check to see if the size is small enough that this might work |
1288 | if (minimumSize > COLLECTIBLE_CODEHEAP_SIZE) |
1289 | return NULL; |
1290 | |
1291 | // Check to see if initial alloc would be in the proper region |
1292 | if (loAddr != NULL || hiAddr != NULL) |
1293 | { |
1294 | if (m_pCodeHeapInitialAlloc < loAddr) |
1295 | return NULL; |
1296 | if ((m_pCodeHeapInitialAlloc + COLLECTIBLE_CODEHEAP_SIZE) > hiAddr) |
1297 | return NULL; |
1298 | } |
1299 | |
1300 | BYTE * buffer = InterlockedCompareExchangeT(&m_pCodeHeapInitialAlloc, NULL, m_pCodeHeapInitialAlloc); |
1301 | if (buffer != NULL) |
1302 | { |
1303 | *pSize = COLLECTIBLE_CODEHEAP_SIZE; |
1304 | } |
1305 | return buffer; |
1306 | } |
1307 | |
1308 | // in retail should be called from AppDomain::Terminate |
1309 | void LoaderAllocator::Terminate() |
1310 | { |
1311 | CONTRACTL { |
1312 | NOTHROW; |
1313 | GC_TRIGGERS; |
1314 | MODE_ANY; |
1315 | SO_INTOLERANT; |
1316 | } CONTRACTL_END; |
1317 | |
1318 | if (m_fTerminated) |
1319 | return; |
1320 | |
1321 | m_fTerminated = true; |
1322 | |
1323 | LOG((LF_CLASSLOADER, LL_INFO100, "Begin LoaderAllocator::Terminate for loader allocator %p\n" , reinterpret_cast<void *>(static_cast<PTR_LoaderAllocator>(this)))); |
1324 | |
1325 | if (m_fGCPressure) |
1326 | { |
1327 | GCX_PREEMP(); |
1328 | GCInterface::RemoveMemoryPressure(30000); |
1329 | m_fGCPressure = false; |
1330 | } |
1331 | |
1332 | delete m_pUMEntryThunkCache; |
1333 | m_pUMEntryThunkCache = NULL; |
1334 | |
1335 | m_crstLoaderAllocator.Destroy(); |
1336 | #ifdef FEATURE_COMINTEROP |
1337 | m_ComCallWrapperCrst.Destroy(); |
1338 | m_InteropDataCrst.Destroy(); |
1339 | #endif |
1340 | m_LoaderAllocatorReferences.RemoveAll(); |
1341 | |
1342 | // In collectible types we merge the low frequency and high frequency heaps |
1343 | // So don't destroy them twice. |
1344 | if ((m_pLowFrequencyHeap != NULL) && (m_pLowFrequencyHeap != m_pHighFrequencyHeap)) |
1345 | { |
1346 | m_pLowFrequencyHeap->~LoaderHeap(); |
1347 | m_pLowFrequencyHeap = NULL; |
1348 | } |
1349 | |
1350 | if (m_pHighFrequencyHeap != NULL) |
1351 | { |
1352 | #ifdef STUBLINKER_GENERATES_UNWIND_INFO |
1353 | UnregisterUnwindInfoInLoaderHeap(m_pHighFrequencyHeap); |
1354 | #endif |
1355 | |
1356 | m_pHighFrequencyHeap->~LoaderHeap(); |
1357 | m_pHighFrequencyHeap = NULL; |
1358 | } |
1359 | |
1360 | if (m_pStubHeap != NULL) |
1361 | { |
1362 | #ifdef STUBLINKER_GENERATES_UNWIND_INFO |
1363 | UnregisterUnwindInfoInLoaderHeap(m_pStubHeap); |
1364 | #endif |
1365 | |
1366 | m_pStubHeap->~LoaderHeap(); |
1367 | m_pStubHeap = NULL; |
1368 | } |
1369 | |
1370 | if (m_pPrecodeHeap != NULL) |
1371 | { |
1372 | m_pPrecodeHeap->~CodeFragmentHeap(); |
1373 | m_pPrecodeHeap = NULL; |
1374 | } |
1375 | |
1376 | #ifdef FEATURE_READYTORUN |
1377 | if (m_pDynamicHelpersHeap != NULL) |
1378 | { |
1379 | delete m_pDynamicHelpersHeap; |
1380 | m_pDynamicHelpersHeap = NULL; |
1381 | } |
1382 | #endif |
1383 | |
1384 | if (m_pFuncPtrStubs != NULL) |
1385 | { |
1386 | delete m_pFuncPtrStubs; |
1387 | m_pFuncPtrStubs = NULL; |
1388 | } |
1389 | |
1390 | // This was the block reserved by BaseDomain::Init for the loaderheaps. |
1391 | if (m_InitialReservedMemForLoaderHeaps) |
1392 | { |
1393 | ClrVirtualFree (m_InitialReservedMemForLoaderHeaps, 0, MEM_RELEASE); |
1394 | m_InitialReservedMemForLoaderHeaps=NULL; |
1395 | } |
1396 | |
1397 | #ifdef FAT_DISPATCH_TOKENS |
1398 | if (m_pFatTokenSetLock != NULL) |
1399 | { |
1400 | delete m_pFatTokenSetLock; |
1401 | m_pFatTokenSetLock = NULL; |
1402 | } |
1403 | |
1404 | if (m_pFatTokenSet != NULL) |
1405 | { |
1406 | delete m_pFatTokenSet; |
1407 | m_pFatTokenSet = NULL; |
1408 | } |
1409 | #endif // FAT_DISPATCH_TOKENS |
1410 | |
1411 | CleanupStringLiteralMap(); |
1412 | |
1413 | LOG((LF_CLASSLOADER, LL_INFO100, "End LoaderAllocator::Terminate for loader allocator %p\n" , reinterpret_cast<void *>(static_cast<PTR_LoaderAllocator>(this)))); |
1414 | } |
1415 | |
1416 | #endif // !CROSSGEN_COMPILE |
1417 | |
1418 | |
1419 | #else //DACCESS_COMPILE |
1420 | void LoaderAllocator::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) |
1421 | { |
1422 | SUPPORTS_DAC; |
1423 | DAC_ENUM_DTHIS(); |
1424 | if (m_pLowFrequencyHeap.IsValid()) |
1425 | { |
1426 | m_pLowFrequencyHeap->EnumMemoryRegions(flags); |
1427 | } |
1428 | if (m_pHighFrequencyHeap.IsValid()) |
1429 | { |
1430 | m_pHighFrequencyHeap->EnumMemoryRegions(flags); |
1431 | } |
1432 | if (m_pStubHeap.IsValid()) |
1433 | { |
1434 | m_pStubHeap->EnumMemoryRegions(flags); |
1435 | } |
1436 | if (m_pPrecodeHeap.IsValid()) |
1437 | { |
1438 | m_pPrecodeHeap->EnumMemoryRegions(flags); |
1439 | } |
1440 | if (m_pPrecodeHeap.IsValid()) |
1441 | { |
1442 | m_pPrecodeHeap->EnumMemoryRegions(flags); |
1443 | } |
1444 | } |
1445 | #endif //DACCESS_COMPILE |
1446 | |
1447 | SIZE_T LoaderAllocator::EstimateSize() |
1448 | { |
1449 | WRAPPER_NO_CONTRACT; |
1450 | SIZE_T retval=0; |
1451 | if(m_pHighFrequencyHeap) |
1452 | retval+=m_pHighFrequencyHeap->GetSize(); |
1453 | if(m_pLowFrequencyHeap) |
1454 | retval+=m_pLowFrequencyHeap->GetSize(); |
1455 | if(m_pStubHeap) |
1456 | retval+=m_pStubHeap->GetSize(); |
1457 | if(m_pStringLiteralMap) |
1458 | retval+=m_pStringLiteralMap->GetSize(); |
1459 | #ifndef CROSSGEN_COMPILE |
1460 | if(m_pVirtualCallStubManager) |
1461 | retval+=m_pVirtualCallStubManager->GetSize(); |
1462 | #endif |
1463 | |
1464 | return retval; |
1465 | } |
1466 | |
1467 | #ifndef DACCESS_COMPILE |
1468 | |
1469 | #ifndef CROSSGEN_COMPILE |
1470 | |
1471 | DispatchToken LoaderAllocator::GetDispatchToken( |
1472 | UINT32 typeId, UINT32 slotNumber) |
1473 | { |
1474 | CONTRACTL { |
1475 | THROWS; |
1476 | GC_TRIGGERS; |
1477 | MODE_ANY; |
1478 | INJECT_FAULT(COMPlusThrowOM();); |
1479 | } CONTRACTL_END; |
1480 | |
1481 | #ifdef FAT_DISPATCH_TOKENS |
1482 | |
1483 | if (DispatchToken::RequiresDispatchTokenFat(typeId, slotNumber)) |
1484 | { |
1485 | // |
1486 | // Lock and set are lazily created. |
1487 | // |
1488 | if (m_pFatTokenSetLock == NULL) |
1489 | { |
1490 | NewHolder<SimpleRWLock> pFatTokenSetLock = new SimpleRWLock(COOPERATIVE_OR_PREEMPTIVE, LOCK_TYPE_DEFAULT); |
1491 | SimpleWriteLockHolder lock(pFatTokenSetLock); |
1492 | NewHolder<FatTokenSet> pFatTokenSet = new FatTokenSet; |
1493 | |
1494 | if (FastInterlockCompareExchangePointer( |
1495 | &m_pFatTokenSetLock, pFatTokenSetLock.GetValue(), NULL) != NULL) |
1496 | { // Someone beat us to it |
1497 | lock.Release(); |
1498 | // NewHolder will delete lock. |
1499 | } |
1500 | else |
1501 | { // Make sure second allocation succeeds before suppressing holder of first. |
1502 | pFatTokenSetLock.SuppressRelease(); |
1503 | m_pFatTokenSet = pFatTokenSet; |
1504 | pFatTokenSet.SuppressRelease(); |
1505 | } |
1506 | } |
1507 | |
1508 | // |
1509 | // Take read lock, see if the requisite token has already been created and if so use it. |
1510 | // Otherwise, take write lock and create new token and add to the set. |
1511 | // |
1512 | |
1513 | // Lookup |
1514 | SimpleReadLockHolder rlock(m_pFatTokenSetLock); |
1515 | DispatchTokenFat key(typeId, slotNumber); |
1516 | DispatchTokenFat *pFat = m_pFatTokenSet->Lookup(&key); |
1517 | if (pFat != NULL) |
1518 | { // <typeId,slotNumber> is already in the set. |
1519 | return DispatchToken(pFat); |
1520 | } |
1521 | else |
1522 | { // Create |
1523 | rlock.Release(); |
1524 | SimpleWriteLockHolder wlock(m_pFatTokenSetLock); |
1525 | |
1526 | // Check to see if someone beat us to the punch between |
1527 | // releasing the read lock and taking the write lock. |
1528 | pFat = m_pFatTokenSet->Lookup(&key); |
1529 | |
1530 | if (pFat == NULL) |
1531 | { // No one beat us; allocate and insert a new DispatchTokenFat instance. |
1532 | pFat = new ((LPVOID)GetHighFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(DispatchTokenFat)))) |
1533 | DispatchTokenFat(typeId, slotNumber); |
1534 | |
1535 | m_pFatTokenSet->Add(pFat); |
1536 | } |
1537 | |
1538 | return DispatchToken(pFat); |
1539 | } |
1540 | } |
1541 | #endif // FAT_DISPATCH_TOKENS |
1542 | |
1543 | return DispatchToken::CreateDispatchToken(typeId, slotNumber); |
1544 | } |
1545 | |
1546 | DispatchToken LoaderAllocator::TryLookupDispatchToken(UINT32 typeId, UINT32 slotNumber) |
1547 | { |
1548 | CONTRACTL { |
1549 | NOTHROW; |
1550 | GC_NOTRIGGER; |
1551 | MODE_ANY; |
1552 | SO_TOLERANT; |
1553 | } CONTRACTL_END; |
1554 | |
1555 | #ifdef FAT_DISPATCH_TOKENS |
1556 | |
1557 | if (DispatchToken::RequiresDispatchTokenFat(typeId, slotNumber)) |
1558 | { |
1559 | if (m_pFatTokenSetLock != NULL) |
1560 | { |
1561 | DispatchTokenFat * pFat = NULL; |
1562 | // Stack probes and locking operations are throwing. Catch all |
1563 | // exceptions and just return an invalid token, since this is |
1564 | EX_TRY |
1565 | { |
1566 | BEGIN_SO_INTOLERANT_CODE(GetThread()); |
1567 | SimpleReadLockHolder rlock(m_pFatTokenSetLock); |
1568 | if (m_pFatTokenSet != NULL) |
1569 | { |
1570 | DispatchTokenFat key(typeId, slotNumber); |
1571 | pFat = m_pFatTokenSet->Lookup(&key); |
1572 | } |
1573 | END_SO_INTOLERANT_CODE; |
1574 | } |
1575 | EX_CATCH |
1576 | { |
1577 | pFat = NULL; |
1578 | } |
1579 | EX_END_CATCH(SwallowAllExceptions); |
1580 | |
1581 | if (pFat != NULL) |
1582 | { |
1583 | return DispatchToken(pFat); |
1584 | } |
1585 | } |
1586 | // Return invalid token when not found. |
1587 | return DispatchToken(); |
1588 | } |
1589 | else |
1590 | #endif // FAT_DISPATCH_TOKENS |
1591 | { |
1592 | return DispatchToken::CreateDispatchToken(typeId, slotNumber); |
1593 | } |
1594 | } |
1595 | |
1596 | void LoaderAllocator::InitVirtualCallStubManager(BaseDomain * pDomain) |
1597 | { |
1598 | STANDARD_VM_CONTRACT; |
1599 | |
1600 | NewHolder<VirtualCallStubManager> pMgr(new VirtualCallStubManager()); |
1601 | |
1602 | // Init the manager, including all heaps and such. |
1603 | pMgr->Init(pDomain, this); |
1604 | |
1605 | m_pVirtualCallStubManager = pMgr; |
1606 | |
1607 | // Successfully created the manager. |
1608 | pMgr.SuppressRelease(); |
1609 | } |
1610 | |
1611 | void LoaderAllocator::UninitVirtualCallStubManager() |
1612 | { |
1613 | WRAPPER_NO_CONTRACT; |
1614 | |
1615 | if (m_pVirtualCallStubManager != NULL) |
1616 | { |
1617 | m_pVirtualCallStubManager->Uninit(); |
1618 | delete m_pVirtualCallStubManager; |
1619 | m_pVirtualCallStubManager = NULL; |
1620 | } |
1621 | } |
1622 | #endif // !CROSSGEN_COMPILE |
1623 | |
1624 | #endif // !DACCESS_COMPILE |
1625 | |
1626 | BOOL GlobalLoaderAllocator::CanUnload() |
1627 | { |
1628 | LIMITED_METHOD_CONTRACT; |
1629 | |
1630 | return FALSE; |
1631 | } |
1632 | |
1633 | BOOL AssemblyLoaderAllocator::CanUnload() |
1634 | { |
1635 | LIMITED_METHOD_CONTRACT; |
1636 | |
1637 | return TRUE; |
1638 | } |
1639 | |
1640 | DomainAssemblyIterator::DomainAssemblyIterator(DomainAssembly* pFirstAssembly) |
1641 | { |
1642 | pCurrentAssembly = pFirstAssembly; |
1643 | pNextAssembly = pCurrentAssembly ? pCurrentAssembly->GetNextDomainAssemblyInSameALC() : NULL; |
1644 | } |
1645 | |
1646 | void DomainAssemblyIterator::operator++() |
1647 | { |
1648 | pCurrentAssembly = pNextAssembly; |
1649 | pNextAssembly = pCurrentAssembly ? pCurrentAssembly->GetNextDomainAssemblyInSameALC() : NULL; |
1650 | } |
1651 | |
1652 | void AssemblyLoaderAllocator::SetCollectible() |
1653 | { |
1654 | CONTRACTL |
1655 | { |
1656 | THROWS; |
1657 | } |
1658 | CONTRACTL_END; |
1659 | |
1660 | m_IsCollectible = true; |
1661 | #ifndef DACCESS_COMPILE |
1662 | m_pShuffleThunkCache = new ShuffleThunkCache(m_pStubHeap); |
1663 | #endif |
1664 | } |
1665 | |
1666 | #ifndef DACCESS_COMPILE |
1667 | |
1668 | #ifndef CROSSGEN_COMPILE |
1669 | |
1670 | AssemblyLoaderAllocator::~AssemblyLoaderAllocator() |
1671 | { |
1672 | if (m_binderToRelease != NULL) |
1673 | { |
1674 | VERIFY(m_binderToRelease->Release() == 0); |
1675 | m_binderToRelease = NULL; |
1676 | } |
1677 | |
1678 | delete m_pShuffleThunkCache; |
1679 | m_pShuffleThunkCache = NULL; |
1680 | } |
1681 | |
1682 | void AssemblyLoaderAllocator::RegisterBinder(CLRPrivBinderAssemblyLoadContext* binderToRelease) |
1683 | { |
1684 | // When the binder is registered it will be released by the destructor |
1685 | // of this instance |
1686 | _ASSERTE(m_binderToRelease == NULL); |
1687 | m_binderToRelease = binderToRelease; |
1688 | } |
1689 | |
1690 | STRINGREF *LoaderAllocator::GetStringObjRefPtrFromUnicodeString(EEStringData *pStringData) |
1691 | { |
1692 | CONTRACTL |
1693 | { |
1694 | GC_TRIGGERS; |
1695 | THROWS; |
1696 | MODE_COOPERATIVE; |
1697 | PRECONDITION(CheckPointer(pStringData)); |
1698 | INJECT_FAULT(COMPlusThrowOM();); |
1699 | } |
1700 | CONTRACTL_END; |
1701 | if (m_pStringLiteralMap == NULL) |
1702 | { |
1703 | LazyInitStringLiteralMap(); |
1704 | } |
1705 | _ASSERTE(m_pStringLiteralMap); |
1706 | return m_pStringLiteralMap->GetStringLiteral(pStringData, TRUE, !CanUnload()); |
1707 | } |
1708 | |
1709 | //***************************************************************************** |
1710 | void LoaderAllocator::LazyInitStringLiteralMap() |
1711 | { |
1712 | CONTRACTL |
1713 | { |
1714 | THROWS; |
1715 | GC_TRIGGERS; |
1716 | MODE_ANY; |
1717 | INJECT_FAULT(COMPlusThrowOM();); |
1718 | } |
1719 | CONTRACTL_END; |
1720 | |
1721 | NewHolder<StringLiteralMap> pStringLiteralMap(new StringLiteralMap()); |
1722 | |
1723 | pStringLiteralMap->Init(); |
1724 | |
1725 | if (InterlockedCompareExchangeT<StringLiteralMap *>(&m_pStringLiteralMap, pStringLiteralMap, NULL) == NULL) |
1726 | { |
1727 | pStringLiteralMap.SuppressRelease(); |
1728 | } |
1729 | } |
1730 | |
1731 | void LoaderAllocator::CleanupStringLiteralMap() |
1732 | { |
1733 | CONTRACTL |
1734 | { |
1735 | NOTHROW; |
1736 | GC_TRIGGERS; |
1737 | MODE_ANY; |
1738 | } |
1739 | CONTRACTL_END; |
1740 | |
1741 | if (m_pStringLiteralMap) |
1742 | { |
1743 | delete m_pStringLiteralMap; |
1744 | m_pStringLiteralMap = NULL; |
1745 | } |
1746 | } |
1747 | |
1748 | STRINGREF *LoaderAllocator::IsStringInterned(STRINGREF *pString) |
1749 | { |
1750 | CONTRACTL |
1751 | { |
1752 | GC_TRIGGERS; |
1753 | THROWS; |
1754 | MODE_COOPERATIVE; |
1755 | PRECONDITION(CheckPointer(pString)); |
1756 | INJECT_FAULT(COMPlusThrowOM();); |
1757 | } |
1758 | CONTRACTL_END; |
1759 | if (m_pStringLiteralMap == NULL) |
1760 | { |
1761 | LazyInitStringLiteralMap(); |
1762 | } |
1763 | _ASSERTE(m_pStringLiteralMap); |
1764 | return m_pStringLiteralMap->GetInternedString(pString, FALSE, !CanUnload()); |
1765 | } |
1766 | |
1767 | STRINGREF *LoaderAllocator::GetOrInternString(STRINGREF *pString) |
1768 | { |
1769 | CONTRACTL |
1770 | { |
1771 | GC_TRIGGERS; |
1772 | THROWS; |
1773 | MODE_COOPERATIVE; |
1774 | PRECONDITION(CheckPointer(pString)); |
1775 | INJECT_FAULT(COMPlusThrowOM();); |
1776 | } |
1777 | CONTRACTL_END; |
1778 | if (m_pStringLiteralMap == NULL) |
1779 | { |
1780 | LazyInitStringLiteralMap(); |
1781 | } |
1782 | _ASSERTE(m_pStringLiteralMap); |
1783 | return m_pStringLiteralMap->GetInternedString(pString, TRUE, !CanUnload()); |
1784 | } |
1785 | |
1786 | void AssemblyLoaderAllocator::RegisterHandleForCleanup(OBJECTHANDLE objHandle) |
1787 | { |
1788 | CONTRACTL |
1789 | { |
1790 | GC_TRIGGERS; |
1791 | THROWS; |
1792 | MODE_ANY; |
1793 | CAN_TAKE_LOCK; |
1794 | PRECONDITION(CheckPointer(objHandle)); |
1795 | INJECT_FAULT(COMPlusThrowOM();); |
1796 | } |
1797 | CONTRACTL_END; |
1798 | |
1799 | void * pItem = GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(HandleCleanupListItem))); |
1800 | |
1801 | // InsertTail must be protected by a lock. Just use the loader allocator lock |
1802 | CrstHolder ch(&m_crstLoaderAllocator); |
1803 | m_handleCleanupList.InsertTail(new (pItem) HandleCleanupListItem(objHandle)); |
1804 | } |
1805 | |
1806 | void AssemblyLoaderAllocator::CleanupHandles() |
1807 | { |
1808 | CONTRACTL |
1809 | { |
1810 | GC_TRIGGERS; |
1811 | NOTHROW; |
1812 | MODE_ANY; |
1813 | CAN_TAKE_LOCK; |
1814 | } |
1815 | CONTRACTL_END; |
1816 | |
1817 | _ASSERTE(GetDomain()->IsAppDomain()); |
1818 | |
1819 | // This method doesn't take a lock around RemoveHead because it's supposed to |
1820 | // be called only from Terminate |
1821 | while (!m_handleCleanupList.IsEmpty()) |
1822 | { |
1823 | HandleCleanupListItem * pItem = m_handleCleanupList.RemoveHead(); |
1824 | DestroyTypedHandle(pItem->m_handle); |
1825 | } |
1826 | } |
1827 | |
1828 | void LoaderAllocator::RegisterFailedTypeInitForCleanup(ListLockEntry *pListLockEntry) |
1829 | { |
1830 | CONTRACTL |
1831 | { |
1832 | GC_TRIGGERS; |
1833 | THROWS; |
1834 | MODE_ANY; |
1835 | CAN_TAKE_LOCK; |
1836 | PRECONDITION(CheckPointer(pListLockEntry)); |
1837 | INJECT_FAULT(COMPlusThrowOM();); |
1838 | } |
1839 | CONTRACTL_END; |
1840 | |
1841 | if (!IsCollectible()) |
1842 | { |
1843 | return; |
1844 | } |
1845 | |
1846 | void * pItem = GetLowFrequencyHeap()->AllocMem(S_SIZE_T(sizeof(FailedTypeInitCleanupListItem))); |
1847 | |
1848 | // InsertTail must be protected by a lock. Just use the loader allocator lock |
1849 | CrstHolder ch(&m_crstLoaderAllocator); |
1850 | m_failedTypeInitCleanupList.InsertTail(new (pItem) FailedTypeInitCleanupListItem(pListLockEntry)); |
1851 | } |
1852 | |
1853 | void LoaderAllocator::CleanupFailedTypeInit() |
1854 | { |
1855 | CONTRACTL |
1856 | { |
1857 | GC_TRIGGERS; |
1858 | THROWS; |
1859 | MODE_ANY; |
1860 | CAN_TAKE_LOCK; |
1861 | } |
1862 | CONTRACTL_END; |
1863 | |
1864 | if (!IsCollectible()) |
1865 | { |
1866 | return; |
1867 | } |
1868 | |
1869 | _ASSERTE(GetDomain()->IsAppDomain()); |
1870 | |
1871 | // This method doesn't take a lock around loader allocator state access, because |
1872 | // it's supposed to be called only during cleanup. However, the domain-level state |
1873 | // might be accessed by multiple threads. |
1874 | ListLock *pLock = GetDomain()->GetClassInitLock(); |
1875 | |
1876 | while (!m_failedTypeInitCleanupList.IsEmpty()) |
1877 | { |
1878 | FailedTypeInitCleanupListItem * pItem = m_failedTypeInitCleanupList.RemoveHead(); |
1879 | |
1880 | ListLockHolder pInitLock(pLock); |
1881 | pLock->Unlink(pItem->m_pListLockEntry); |
1882 | } |
1883 | } |
1884 | |
1885 | void AssemblyLoaderAllocator::ReleaseManagedAssemblyLoadContext() |
1886 | { |
1887 | CONTRACTL |
1888 | { |
1889 | THROWS; |
1890 | GC_TRIGGERS; |
1891 | MODE_ANY; |
1892 | SO_INTOLERANT; |
1893 | } |
1894 | CONTRACTL_END; |
1895 | |
1896 | if (m_binderToRelease != NULL) |
1897 | { |
1898 | // Release the managed ALC |
1899 | m_binderToRelease->ReleaseLoadContext(); |
1900 | } |
1901 | } |
1902 | |
1903 | #ifdef FEATURE_COMINTEROP |
1904 | ComCallWrapperCache * LoaderAllocator::GetComCallWrapperCache() |
1905 | { |
1906 | CONTRACTL |
1907 | { |
1908 | THROWS; |
1909 | GC_TRIGGERS; |
1910 | MODE_ANY; |
1911 | INJECT_FAULT(COMPlusThrowOM();); |
1912 | } |
1913 | CONTRACTL_END; |
1914 | |
1915 | if (!m_pComCallWrapperCache) |
1916 | { |
1917 | CrstHolder lh(&m_ComCallWrapperCrst); |
1918 | |
1919 | if (!m_pComCallWrapperCache) |
1920 | m_pComCallWrapperCache = ComCallWrapperCache::Create(this); |
1921 | } |
1922 | _ASSERTE(m_pComCallWrapperCache); |
1923 | return m_pComCallWrapperCache; |
1924 | } |
1925 | #endif // FEATURE_COMINTEROP |
1926 | |
1927 | // U->M thunks created in this LoaderAllocator and not associated with a delegate. |
1928 | UMEntryThunkCache *LoaderAllocator::GetUMEntryThunkCache() |
1929 | { |
1930 | CONTRACTL |
1931 | { |
1932 | THROWS; |
1933 | GC_TRIGGERS; |
1934 | MODE_ANY; |
1935 | INJECT_FAULT(COMPlusThrowOM();); |
1936 | } |
1937 | CONTRACTL_END; |
1938 | |
1939 | if (!m_pUMEntryThunkCache) |
1940 | { |
1941 | UMEntryThunkCache *pUMEntryThunkCache = new UMEntryThunkCache(GetAppDomain()); |
1942 | |
1943 | if (FastInterlockCompareExchangePointer(&m_pUMEntryThunkCache, pUMEntryThunkCache, NULL) != NULL) |
1944 | { |
1945 | // some thread swooped in and set the field |
1946 | delete pUMEntryThunkCache; |
1947 | } |
1948 | } |
1949 | _ASSERTE(m_pUMEntryThunkCache); |
1950 | return m_pUMEntryThunkCache; |
1951 | } |
1952 | |
1953 | #endif // !CROSSGEN_COMPILE |
1954 | |
1955 | #ifdef FEATURE_COMINTEROP |
1956 | |
1957 | // Look up interop data for a method table |
1958 | // Returns the data pointer if present, NULL otherwise |
1959 | InteropMethodTableData *LoaderAllocator::LookupComInteropData(MethodTable *pMT) |
1960 | { |
1961 | // Take the lock |
1962 | CrstHolder holder(&m_InteropDataCrst); |
1963 | |
1964 | // Lookup |
1965 | InteropMethodTableData *pData = (InteropMethodTableData*)m_interopDataHash.LookupValue((UPTR)pMT, (LPVOID)NULL); |
1966 | |
1967 | // Not there... |
1968 | if (pData == (InteropMethodTableData*)INVALIDENTRY) |
1969 | return NULL; |
1970 | |
1971 | // Found it |
1972 | return pData; |
1973 | } |
1974 | |
1975 | // Returns TRUE if successfully inserted, FALSE if this would be a duplicate entry |
1976 | BOOL LoaderAllocator::InsertComInteropData(MethodTable* pMT, InteropMethodTableData *pData) |
1977 | { |
1978 | // We don't keep track of this kind of information for interfaces |
1979 | _ASSERTE(!pMT->IsInterface()); |
1980 | |
1981 | // Take the lock |
1982 | CrstHolder holder(&m_InteropDataCrst); |
1983 | |
1984 | // Check to see that it's not already in there |
1985 | InteropMethodTableData *pDupData = (InteropMethodTableData*)m_interopDataHash.LookupValue((UPTR)pMT, (LPVOID)NULL); |
1986 | if (pDupData != (InteropMethodTableData*)INVALIDENTRY) |
1987 | return FALSE; |
1988 | |
1989 | // Not in there, so insert |
1990 | m_interopDataHash.InsertValue((UPTR)pMT, (LPVOID)pData); |
1991 | |
1992 | // Success |
1993 | return TRUE; |
1994 | } |
1995 | |
1996 | #endif // FEATURE_COMINTEROP |
1997 | |
1998 | #endif // !DACCESS_COMPILE |
1999 | |