1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | /* |
6 | * Wraps handle table to implement various handle types (Strong, Weak, etc.) |
7 | * |
8 | |
9 | * |
10 | */ |
11 | |
12 | #include "common.h" |
13 | |
14 | #include "gcenv.h" |
15 | |
16 | #include "gc.h" |
17 | #include "gcscan.h" |
18 | |
19 | #include "objecthandle.h" |
20 | #include "handletablepriv.h" |
21 | |
22 | #include "gchandletableimpl.h" |
23 | |
24 | HandleTableMap g_HandleTableMap; |
25 | |
26 | // Array of contexts used while scanning dependent handles for promotion. There are as many contexts as GC |
27 | // heaps and they're allocated by Ref_Initialize and initialized during each GC by GcDhInitialScan. |
28 | DhContext *g_pDependentHandleContexts; |
29 | |
30 | #ifndef DACCESS_COMPILE |
31 | |
32 | //---------------------------------------------------------------------------- |
33 | |
34 | /* |
35 | * struct VARSCANINFO |
36 | * |
37 | * used when tracing variable-strength handles. |
38 | */ |
39 | struct VARSCANINFO |
40 | { |
41 | uintptr_t lEnableMask; // mask of types to trace |
42 | HANDLESCANPROC pfnTrace; // tracing function to use |
43 | uintptr_t lp2; // second parameter |
44 | }; |
45 | |
46 | |
47 | //---------------------------------------------------------------------------- |
48 | |
49 | /* |
50 | * Scan callback for tracing variable-strength handles. |
51 | * |
52 | * This callback is called to trace individual objects referred to by handles |
53 | * in the variable-strength table. |
54 | */ |
55 | void CALLBACK VariableTraceDispatcher(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t lp1, uintptr_t lp2) |
56 | { |
57 | WRAPPER_NO_CONTRACT; |
58 | |
59 | // lp2 is a pointer to our VARSCANINFO |
60 | struct VARSCANINFO *pInfo = (struct VARSCANINFO *)lp2; |
61 | |
62 | // is the handle's dynamic type one we're currently scanning? |
63 | if ((*pExtraInfo & pInfo->lEnableMask) != 0) |
64 | { |
65 | // yes - call the tracing function for this handle |
66 | pInfo->pfnTrace(pObjRef, NULL, lp1, pInfo->lp2); |
67 | } |
68 | } |
69 | |
70 | #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) |
71 | /* |
72 | * Scan callback for tracing ref-counted handles. |
73 | * |
74 | * This callback is called to trace individual objects referred to by handles |
75 | * in the refcounted table. |
76 | */ |
77 | void CALLBACK PromoteRefCounted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *pExtraInfo, uintptr_t lp1, uintptr_t lp2) |
78 | { |
79 | WRAPPER_NO_CONTRACT; |
80 | UNREFERENCED_PARAMETER(pExtraInfo); |
81 | |
82 | // there are too many races when asychnronously scanning ref-counted handles so we no longer support it |
83 | _ASSERTE(!((ScanContext*)lp1)->concurrent); |
84 | |
85 | LOG((LF_GC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("" , pObjRef, "causes promotion of " , *pObjRef))); |
86 | |
87 | Object *pObj = VolatileLoad((PTR_Object*)pObjRef); |
88 | |
89 | #ifdef _DEBUG |
90 | Object *pOldObj = pObj; |
91 | #endif |
92 | |
93 | if (!HndIsNullOrDestroyedHandle(pObj) && !g_theGCHeap->IsPromoted(pObj)) |
94 | { |
95 | if (GCToEEInterface::RefCountedHandleCallbacks(pObj)) |
96 | { |
97 | _ASSERTE(lp2); |
98 | promote_func* callback = (promote_func*) lp2; |
99 | callback(&pObj, (ScanContext *)lp1, 0); |
100 | } |
101 | } |
102 | |
103 | // Assert this object wasn't relocated since we are passing a temporary object's address. |
104 | _ASSERTE(pOldObj == pObj); |
105 | } |
106 | #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK |
107 | |
108 | |
109 | // Only used by profiling/ETW. |
110 | //---------------------------------------------------------------------------- |
111 | |
112 | /* |
113 | * struct DIAG_DEPSCANINFO |
114 | * |
115 | * used when tracing dependent handles for profiling/ETW. |
116 | */ |
117 | struct DIAG_DEPSCANINFO |
118 | { |
119 | HANDLESCANPROC pfnTrace; // tracing function to use |
120 | uintptr_t pfnProfilingOrETW; |
121 | }; |
122 | |
123 | void CALLBACK TraceDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t lp1, uintptr_t lp2) |
124 | { |
125 | WRAPPER_NO_CONTRACT; |
126 | |
127 | if (pObjRef == NULL || pExtraInfo == NULL) |
128 | return; |
129 | |
130 | // At this point, it's possible that either or both of the primary and secondary |
131 | // objects are NULL. However, if the secondary object is non-NULL, then the primary |
132 | // object should also be non-NULL. |
133 | _ASSERTE(*pExtraInfo == 0 || *pObjRef != NULL); |
134 | |
135 | struct DIAG_DEPSCANINFO *pInfo = (struct DIAG_DEPSCANINFO*)lp2; |
136 | |
137 | HANDLESCANPROC pfnTrace = pInfo->pfnTrace; |
138 | |
139 | // is the handle's secondary object non-NULL? |
140 | if ((*pObjRef != NULL) && (*pExtraInfo != 0)) |
141 | { |
142 | // yes - call the tracing function for this handle |
143 | pfnTrace(pObjRef, NULL, lp1, (uintptr_t)(pInfo->pfnProfilingOrETW)); |
144 | } |
145 | } |
146 | |
147 | void CALLBACK UpdateDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t lp1, uintptr_t lp2) |
148 | { |
149 | LIMITED_METHOD_CONTRACT; |
150 | _ASSERTE(pExtraInfo); |
151 | |
152 | Object **pPrimaryRef = (Object **)pObjRef; |
153 | Object **pSecondaryRef = (Object **)pExtraInfo; |
154 | |
155 | LOG((LF_GC|LF_ENC, LL_INFO10000, LOG_HANDLE_OBJECT("Querying for new location of " , |
156 | pPrimaryRef, "to " , *pPrimaryRef))); |
157 | LOG((LF_GC|LF_ENC, LL_INFO10000, LOG_HANDLE_OBJECT(" and " , |
158 | pSecondaryRef, "to " , *pSecondaryRef))); |
159 | |
160 | #ifdef _DEBUG |
161 | Object *pOldPrimary = *pPrimaryRef; |
162 | Object *pOldSecondary = *pSecondaryRef; |
163 | #endif |
164 | |
165 | _ASSERTE(lp2); |
166 | promote_func* callback = (promote_func*) lp2; |
167 | callback(pPrimaryRef, (ScanContext *)lp1, 0); |
168 | callback(pSecondaryRef, (ScanContext *)lp1, 0); |
169 | |
170 | #ifdef _DEBUG |
171 | if (pOldPrimary != *pPrimaryRef) |
172 | LOG((LF_GC|LF_ENC, LL_INFO10000, "Updating " FMT_HANDLE "from" FMT_ADDR "to " FMT_OBJECT "\n" , |
173 | DBG_ADDR(pPrimaryRef), DBG_ADDR(pOldPrimary), DBG_ADDR(*pPrimaryRef))); |
174 | else |
175 | LOG((LF_GC|LF_ENC, LL_INFO10000, "Updating " FMT_HANDLE "- " FMT_OBJECT "did not move\n" , |
176 | DBG_ADDR(pPrimaryRef), DBG_ADDR(*pPrimaryRef))); |
177 | if (pOldSecondary != *pSecondaryRef) |
178 | LOG((LF_GC|LF_ENC, LL_INFO10000, "Updating " FMT_HANDLE "from" FMT_ADDR "to " FMT_OBJECT "\n" , |
179 | DBG_ADDR(pSecondaryRef), DBG_ADDR(pOldSecondary), DBG_ADDR(*pSecondaryRef))); |
180 | else |
181 | LOG((LF_GC|LF_ENC, LL_INFO10000, "Updating " FMT_HANDLE "- " FMT_OBJECT "did not move\n" , |
182 | DBG_ADDR(pSecondaryRef), DBG_ADDR(*pSecondaryRef))); |
183 | #endif |
184 | } |
185 | |
186 | void CALLBACK PromoteDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t lp1, uintptr_t lp2) |
187 | { |
188 | LIMITED_METHOD_CONTRACT; |
189 | _ASSERTE(pExtraInfo); |
190 | |
191 | Object **pPrimaryRef = (Object **)pObjRef; |
192 | Object **pSecondaryRef = (Object **)pExtraInfo; |
193 | LOG((LF_GC|LF_ENC, LL_INFO1000, "Checking promotion of DependentHandle" )); |
194 | LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tPrimary:\t" , pObjRef, "to " , *pObjRef))); |
195 | LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tSecondary\t" , pSecondaryRef, "to " , *pSecondaryRef))); |
196 | |
197 | ScanContext *sc = (ScanContext*)lp1; |
198 | DhContext *pDhContext = Ref_GetDependentHandleContext(sc); |
199 | |
200 | if (*pObjRef && g_theGCHeap->IsPromoted(*pPrimaryRef)) |
201 | { |
202 | if (!g_theGCHeap->IsPromoted(*pSecondaryRef)) |
203 | { |
204 | LOG((LF_GC|LF_ENC, LL_INFO10000, "\tPromoting secondary " LOG_OBJECT_CLASS(*pSecondaryRef))); |
205 | _ASSERTE(lp2); |
206 | promote_func* callback = (promote_func*) lp2; |
207 | callback(pSecondaryRef, (ScanContext *)lp1, 0); |
208 | // need to rescan because we might have promoted an object that itself has added fields and this |
209 | // promotion might be all that is pinning that object. If we've already scanned that dependent |
210 | // handle relationship, we could lose it secondary object. |
211 | pDhContext->m_fPromoted = true; |
212 | } |
213 | } |
214 | else if (*pObjRef) |
215 | { |
216 | // If we see a non-cleared primary which hasn't been promoted, record the fact. We will only require a |
217 | // rescan if this flag has been set (if it's clear then the previous scan found only clear and |
218 | // promoted handles, so there's no chance of finding an additional handle being promoted on a |
219 | // subsequent scan). |
220 | pDhContext->m_fUnpromotedPrimaries = true; |
221 | } |
222 | } |
223 | |
224 | void CALLBACK ClearDependentHandle(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t /*lp1*/, uintptr_t /*lp2*/) |
225 | { |
226 | LIMITED_METHOD_CONTRACT; |
227 | _ASSERTE(pExtraInfo); |
228 | |
229 | Object **pPrimaryRef = (Object **)pObjRef; |
230 | Object **pSecondaryRef = (Object **)pExtraInfo; |
231 | LOG((LF_GC|LF_ENC, LL_INFO1000, "Checking referent of DependentHandle" )); |
232 | LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tPrimary:\t" , pPrimaryRef, "to " , *pPrimaryRef))); |
233 | LOG((LF_GC|LF_ENC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("\tSecondary\t" , pSecondaryRef, "to " , *pSecondaryRef))); |
234 | |
235 | if (!g_theGCHeap->IsPromoted(*pPrimaryRef)) |
236 | { |
237 | LOG((LF_GC|LF_ENC, LL_INFO1000, "\tunreachable " , LOG_OBJECT_CLASS(*pPrimaryRef))); |
238 | LOG((LF_GC|LF_ENC, LL_INFO1000, "\tunreachable " , LOG_OBJECT_CLASS(*pSecondaryRef))); |
239 | *pPrimaryRef = NULL; |
240 | *pSecondaryRef = NULL; |
241 | } |
242 | else |
243 | { |
244 | _ASSERTE(g_theGCHeap->IsPromoted(*pSecondaryRef)); |
245 | LOG((LF_GC|LF_ENC, LL_INFO10000, "\tPrimary is reachable " LOG_OBJECT_CLASS(*pPrimaryRef))); |
246 | LOG((LF_GC|LF_ENC, LL_INFO10000, "\tSecondary is reachable " LOG_OBJECT_CLASS(*pSecondaryRef))); |
247 | } |
248 | } |
249 | |
250 | /* |
251 | * Scan callback for pinning handles. |
252 | * |
253 | * This callback is called to pin individual objects referred to by handles in |
254 | * the pinning table. |
255 | */ |
256 | void CALLBACK PinObject(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t lp1, uintptr_t lp2) |
257 | { |
258 | STATIC_CONTRACT_NOTHROW; |
259 | STATIC_CONTRACT_GC_NOTRIGGER; |
260 | STATIC_CONTRACT_SO_TOLERANT; |
261 | STATIC_CONTRACT_MODE_COOPERATIVE; |
262 | UNREFERENCED_PARAMETER(pExtraInfo); |
263 | |
264 | // PINNING IS BAD - DON'T DO IT IF YOU CAN AVOID IT |
265 | LOG((LF_GC, LL_WARNING, LOG_HANDLE_OBJECT_CLASS("WARNING: " , pObjRef, "causes pinning of " , *pObjRef))); |
266 | |
267 | Object **pRef = (Object **)pObjRef; |
268 | _ASSERTE(lp2); |
269 | promote_func* callback = (promote_func*) lp2; |
270 | callback(pRef, (ScanContext *)lp1, GC_CALL_PINNED); |
271 | } |
272 | |
273 | void CALLBACK AsyncPinObject(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t lp1, uintptr_t lp2) |
274 | { |
275 | UNREFERENCED_PARAMETER(pExtraInfo); |
276 | |
277 | LOG((LF_GC, LL_WARNING, LOG_HANDLE_OBJECT_CLASS("WARNING: " , pObjRef, "causes (async) pinning of " , *pObjRef))); |
278 | |
279 | Object **pRef = (Object **)pObjRef; |
280 | _ASSERTE(lp2); |
281 | promote_func* callback = (promote_func*)lp2; |
282 | callback(pRef, (ScanContext *)lp1, 0); |
283 | Object* pPinnedObj = *pRef; |
284 | if (!HndIsNullOrDestroyedHandle(pPinnedObj)) |
285 | { |
286 | GCToEEInterface::WalkAsyncPinnedForPromotion(pPinnedObj, (ScanContext *)lp1, callback); |
287 | } |
288 | } |
289 | |
290 | |
291 | /* |
292 | * Scan callback for tracing strong handles. |
293 | * |
294 | * This callback is called to trace individual objects referred to by handles |
295 | * in the strong table. |
296 | */ |
297 | void CALLBACK PromoteObject(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t lp1, uintptr_t lp2) |
298 | { |
299 | WRAPPER_NO_CONTRACT; |
300 | UNREFERENCED_PARAMETER(pExtraInfo); |
301 | |
302 | LOG((LF_GC, LL_INFO1000, LOG_HANDLE_OBJECT_CLASS("" , pObjRef, "causes promotion of " , *pObjRef))); |
303 | |
304 | Object **ppRef = (Object **)pObjRef; |
305 | _ASSERTE(lp2); |
306 | promote_func* callback = (promote_func*) lp2; |
307 | callback(ppRef, (ScanContext *)lp1, 0); |
308 | } |
309 | |
310 | |
311 | /* |
312 | * Scan callback for disconnecting dead handles. |
313 | * |
314 | * This callback is called to check promotion of individual objects referred to by |
315 | * handles in the weak tables. |
316 | */ |
317 | void CALLBACK CheckPromoted(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t lp1, uintptr_t lp2) |
318 | { |
319 | WRAPPER_NO_CONTRACT; |
320 | UNREFERENCED_PARAMETER(pExtraInfo); |
321 | UNREFERENCED_PARAMETER(lp1); |
322 | UNREFERENCED_PARAMETER(lp2); |
323 | |
324 | LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Checking referent of Weak-" , pObjRef, "to " , *pObjRef))); |
325 | |
326 | Object **ppRef = (Object **)pObjRef; |
327 | if (!g_theGCHeap->IsPromoted(*ppRef)) |
328 | { |
329 | LOG((LF_GC, LL_INFO100, LOG_HANDLE_OBJECT_CLASS("Severing Weak-" , pObjRef, "to unreachable " , *pObjRef))); |
330 | |
331 | *ppRef = NULL; |
332 | } |
333 | else |
334 | { |
335 | LOG((LF_GC, LL_INFO1000000, "reachable " LOG_OBJECT_CLASS(*pObjRef))); |
336 | } |
337 | } |
338 | |
339 | void CALLBACK CalculateSizedRefSize(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t lp1, uintptr_t lp2) |
340 | { |
341 | LIMITED_METHOD_CONTRACT; |
342 | |
343 | _ASSERTE(pExtraInfo); |
344 | |
345 | Object **ppSizedRef = (Object **)pObjRef; |
346 | size_t* pSize = (size_t *)pExtraInfo; |
347 | LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Getting size of referent of SizedRef-" , pObjRef, "to " , *pObjRef))); |
348 | |
349 | ScanContext* sc = (ScanContext *)lp1; |
350 | promote_func* callback = (promote_func*) lp2; |
351 | |
352 | size_t sizeBegin = g_theGCHeap->GetPromotedBytes(sc->thread_number); |
353 | callback(ppSizedRef, (ScanContext *)lp1, 0); |
354 | size_t sizeEnd = g_theGCHeap->GetPromotedBytes(sc->thread_number); |
355 | *pSize = sizeEnd - sizeBegin; |
356 | } |
357 | |
358 | /* |
359 | * Scan callback for updating pointers. |
360 | * |
361 | * This callback is called to update pointers for individual objects referred to by |
362 | * handles in the weak and strong tables. |
363 | */ |
364 | void CALLBACK UpdatePointer(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t lp1, uintptr_t lp2) |
365 | { |
366 | LIMITED_METHOD_CONTRACT; |
367 | UNREFERENCED_PARAMETER(pExtraInfo); |
368 | |
369 | LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT("Querying for new location of " , pObjRef, "to " , *pObjRef))); |
370 | |
371 | Object **ppRef = (Object **)pObjRef; |
372 | |
373 | #ifdef _DEBUG |
374 | Object *pOldLocation = *ppRef; |
375 | #endif |
376 | |
377 | _ASSERTE(lp2); |
378 | promote_func* callback = (promote_func*) lp2; |
379 | callback(ppRef, (ScanContext *)lp1, 0); |
380 | |
381 | #ifdef _DEBUG |
382 | if (pOldLocation != *pObjRef) |
383 | LOG((LF_GC, LL_INFO10000, "Updating " FMT_HANDLE "from" FMT_ADDR "to " FMT_OBJECT "\n" , |
384 | DBG_ADDR(pObjRef), DBG_ADDR(pOldLocation), DBG_ADDR(*pObjRef))); |
385 | else |
386 | LOG((LF_GC, LL_INFO100000, "Updating " FMT_HANDLE "- " FMT_OBJECT "did not move\n" , |
387 | DBG_ADDR(pObjRef), DBG_ADDR(*pObjRef))); |
388 | #endif |
389 | } |
390 | |
391 | |
392 | #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
393 | /* |
394 | * Scan callback for updating pointers. |
395 | * |
396 | * This callback is called to update pointers for individual objects referred to by |
397 | * handles in the weak and strong tables. |
398 | */ |
399 | void CALLBACK ScanPointerForProfilerAndETW(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t lp1, uintptr_t lp2) |
400 | { |
401 | CONTRACTL |
402 | { |
403 | NOTHROW; |
404 | GC_NOTRIGGER; |
405 | } |
406 | CONTRACTL_END; |
407 | UNREFERENCED_PARAMETER(pExtraInfo); |
408 | handle_scan_fn fn = (handle_scan_fn)lp2; |
409 | |
410 | LOG((LF_GC | LF_CORPROF, LL_INFO100000, LOG_HANDLE_OBJECT_CLASS("Notifying profiler of " , pObjRef, "to " , *pObjRef))); |
411 | |
412 | // Get the baseobject (which can subsequently be cast into an OBJECTREF == ObjectID |
413 | Object **pRef = (Object **)pObjRef; |
414 | |
415 | // Get a hold of the heap ID that's tacked onto the end of the scancontext struct. |
416 | ScanContext *pSC = (ScanContext *)lp1; |
417 | |
418 | uint32_t rootFlags = 0; |
419 | bool isDependent = false; |
420 | |
421 | OBJECTHANDLE handle = (OBJECTHANDLE)(pRef); |
422 | switch (HandleFetchType(handle)) |
423 | { |
424 | case HNDTYPE_DEPENDENT: |
425 | isDependent = true; |
426 | break; |
427 | case HNDTYPE_WEAK_SHORT: |
428 | case HNDTYPE_WEAK_LONG: |
429 | #ifdef FEATURE_COMINTEROP |
430 | case HNDTYPE_WEAK_WINRT: |
431 | #endif // FEATURE_COMINTEROP |
432 | rootFlags |= kEtwGCRootFlagsWeakRef; |
433 | break; |
434 | |
435 | case HNDTYPE_STRONG: |
436 | case HNDTYPE_SIZEDREF: |
437 | break; |
438 | |
439 | case HNDTYPE_PINNED: |
440 | case HNDTYPE_ASYNCPINNED: |
441 | rootFlags |= kEtwGCRootFlagsPinning; |
442 | break; |
443 | |
444 | case HNDTYPE_VARIABLE: |
445 | #ifdef FEATURE_REDHAWK |
446 | { |
447 | // Set the appropriate ETW flags for the current strength of this variable handle |
448 | uint32_t nVarHandleType = GetVariableHandleType(handle); |
449 | if (((nVarHandleType & VHT_WEAK_SHORT) != 0) || |
450 | ((nVarHandleType & VHT_WEAK_LONG) != 0)) |
451 | { |
452 | rootFlags |= kEtwGCRootFlagsWeakRef; |
453 | } |
454 | if ((nVarHandleType & VHT_PINNED) != 0) |
455 | { |
456 | rootFlags |= kEtwGCRootFlagsPinning; |
457 | } |
458 | |
459 | // No special ETW flag for strong handles (VHT_STRONG) |
460 | } |
461 | #else |
462 | _ASSERTE(!"Variable handle encountered" ); |
463 | #endif |
464 | break; |
465 | |
466 | #if defined(FEATURE_COMINTEROP) && !defined(FEATURE_REDHAWK) |
467 | case HNDTYPE_REFCOUNTED: |
468 | rootFlags |= kEtwGCRootFlagsRefCounted; |
469 | if (*pRef != NULL) |
470 | { |
471 | if (!GCToEEInterface::RefCountedHandleCallbacks(*pRef)) |
472 | rootFlags |= kEtwGCRootFlagsWeakRef; |
473 | } |
474 | break; |
475 | #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK |
476 | } |
477 | |
478 | _UNCHECKED_OBJECTREF pSec = NULL; |
479 | |
480 | if (isDependent) |
481 | { |
482 | pSec = (_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle); |
483 | } |
484 | |
485 | fn(pRef, pSec, rootFlags, pSC, isDependent); |
486 | } |
487 | #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
488 | |
489 | /* |
490 | * Scan callback for updating pointers. |
491 | * |
492 | * This callback is called to update pointers for individual objects referred to by |
493 | * handles in the pinned table. |
494 | */ |
495 | void CALLBACK UpdatePointerPinned(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t lp1, uintptr_t lp2) |
496 | { |
497 | LIMITED_METHOD_CONTRACT; |
498 | UNREFERENCED_PARAMETER(pExtraInfo); |
499 | |
500 | Object **ppRef = (Object **)pObjRef; |
501 | |
502 | _ASSERTE(lp2); |
503 | promote_func* callback = (promote_func*) lp2; |
504 | callback(ppRef, (ScanContext *)lp1, GC_CALL_PINNED); |
505 | |
506 | LOG((LF_GC, LL_INFO100000, LOG_HANDLE_OBJECT("Updating " , pObjRef, "to pinned " , *pObjRef))); |
507 | } |
508 | |
509 | |
510 | //---------------------------------------------------------------------------- |
511 | |
512 | // flags describing the handle types |
513 | static const uint32_t s_rgTypeFlags[] = |
514 | { |
515 | HNDF_NORMAL, // HNDTYPE_WEAK_SHORT |
516 | HNDF_NORMAL, // HNDTYPE_WEAK_LONG |
517 | HNDF_NORMAL, // HNDTYPE_STRONG |
518 | HNDF_NORMAL, // HNDTYPE_PINNED |
519 | HNDF_EXTRAINFO, // HNDTYPE_VARIABLE |
520 | HNDF_NORMAL, // HNDTYPE_REFCOUNTED |
521 | HNDF_EXTRAINFO, // HNDTYPE_DEPENDENT |
522 | HNDF_NORMAL, // HNDTYPE_ASYNCPINNED |
523 | HNDF_EXTRAINFO, // HNDTYPE_SIZEDREF |
524 | HNDF_EXTRAINFO, // HNDTYPE_WEAK_WINRT |
525 | }; |
526 | |
527 | int getNumberOfSlots() |
528 | { |
529 | WRAPPER_NO_CONTRACT; |
530 | |
531 | // when Ref_Initialize called, IGCHeap::GetNumberOfHeaps() is still 0, so use #procs as a workaround |
532 | // it is legal since even if later #heaps < #procs we create handles by thread home heap |
533 | // and just have extra unused slots in HandleTableBuckets, which does not take a lot of space |
534 | if (!IsServerHeap()) |
535 | return 1; |
536 | |
537 | return GCToOSInterface::GetCurrentProcessCpuCount(); |
538 | } |
539 | |
540 | class HandleTableBucketHolder |
541 | { |
542 | private: |
543 | HandleTableBucket* m_bucket; |
544 | int m_slots; |
545 | BOOL m_SuppressRelease; |
546 | public: |
547 | HandleTableBucketHolder(HandleTableBucket* bucket, int slots); |
548 | ~HandleTableBucketHolder(); |
549 | |
550 | void SuppressRelease() |
551 | { |
552 | m_SuppressRelease = TRUE; |
553 | } |
554 | }; |
555 | |
556 | HandleTableBucketHolder::HandleTableBucketHolder(HandleTableBucket* bucket, int slots) |
557 | :m_bucket(bucket), m_slots(slots), m_SuppressRelease(FALSE) |
558 | { |
559 | } |
560 | |
561 | HandleTableBucketHolder::~HandleTableBucketHolder() |
562 | { |
563 | if (m_SuppressRelease) |
564 | { |
565 | return; |
566 | } |
567 | if (m_bucket->pTable) |
568 | { |
569 | for (int n = 0; n < m_slots; n ++) |
570 | { |
571 | if (m_bucket->pTable[n]) |
572 | { |
573 | HndDestroyHandleTable(m_bucket->pTable[n]); |
574 | } |
575 | } |
576 | delete [] m_bucket->pTable; |
577 | } |
578 | |
579 | // we do not own m_bucket, so we shouldn't delete it here. |
580 | } |
581 | |
582 | bool Ref_Initialize() |
583 | { |
584 | CONTRACTL |
585 | { |
586 | NOTHROW; |
587 | WRAPPER(GC_NOTRIGGER); |
588 | INJECT_FAULT(return false); |
589 | } |
590 | CONTRACTL_END; |
591 | |
592 | // sanity |
593 | _ASSERTE(g_HandleTableMap.pBuckets == NULL); |
594 | |
595 | // Create an array of INITIAL_HANDLE_TABLE_ARRAY_SIZE HandleTableBuckets to hold the handle table sets |
596 | HandleTableBucket** pBuckets = new (nothrow) HandleTableBucket * [ INITIAL_HANDLE_TABLE_ARRAY_SIZE ]; |
597 | if (pBuckets == NULL) |
598 | return false; |
599 | |
600 | ZeroMemory(pBuckets, INITIAL_HANDLE_TABLE_ARRAY_SIZE * sizeof (HandleTableBucket *)); |
601 | |
602 | g_gcGlobalHandleStore = new (nothrow) GCHandleStore(); |
603 | if (g_gcGlobalHandleStore == NULL) |
604 | { |
605 | delete[] pBuckets; |
606 | return false; |
607 | } |
608 | |
609 | // Initialize the bucket in the global handle store |
610 | HandleTableBucket* pBucket = &g_gcGlobalHandleStore->_underlyingBucket; |
611 | |
612 | pBucket->HandleTableIndex = 0; |
613 | |
614 | int n_slots = getNumberOfSlots(); |
615 | |
616 | HandleTableBucketHolder bucketHolder(pBucket, n_slots); |
617 | |
618 | // create the handle table set for the first bucket |
619 | pBucket->pTable = new (nothrow) HHANDLETABLE[n_slots]; |
620 | if (pBucket->pTable == NULL) |
621 | goto CleanupAndFail; |
622 | |
623 | ZeroMemory(pBucket->pTable, |
624 | n_slots * sizeof(HHANDLETABLE)); |
625 | for (int uCPUindex = 0; uCPUindex < n_slots; uCPUindex++) |
626 | { |
627 | pBucket->pTable[uCPUindex] = HndCreateHandleTable(s_rgTypeFlags, _countof(s_rgTypeFlags), ADIndex(1)); |
628 | if (pBucket->pTable[uCPUindex] == NULL) |
629 | goto CleanupAndFail; |
630 | |
631 | HndSetHandleTableIndex(pBucket->pTable[uCPUindex], 0); |
632 | } |
633 | |
634 | pBuckets[0] = pBucket; |
635 | bucketHolder.SuppressRelease(); |
636 | |
637 | g_HandleTableMap.pBuckets = pBuckets; |
638 | g_HandleTableMap.dwMaxIndex = INITIAL_HANDLE_TABLE_ARRAY_SIZE; |
639 | g_HandleTableMap.pNext = NULL; |
640 | |
641 | // Allocate contexts used during dependent handle promotion scanning. There's one of these for every GC |
642 | // heap since they're scanned in parallel. |
643 | g_pDependentHandleContexts = new (nothrow) DhContext[n_slots]; |
644 | if (g_pDependentHandleContexts == NULL) |
645 | goto CleanupAndFail; |
646 | |
647 | return true; |
648 | |
649 | CleanupAndFail: |
650 | if (pBuckets != NULL) |
651 | delete[] pBuckets; |
652 | |
653 | if (g_gcGlobalHandleStore != NULL) |
654 | delete g_gcGlobalHandleStore; |
655 | |
656 | return false; |
657 | } |
658 | |
659 | void Ref_Shutdown() |
660 | { |
661 | WRAPPER_NO_CONTRACT; |
662 | |
663 | if (g_pDependentHandleContexts) |
664 | { |
665 | delete [] g_pDependentHandleContexts; |
666 | g_pDependentHandleContexts = NULL; |
667 | } |
668 | |
669 | // are there any handle tables? |
670 | if (g_HandleTableMap.pBuckets) |
671 | { |
672 | // don't destroy any of the indexed handle tables; they should |
673 | // be destroyed externally. |
674 | |
675 | // destroy the handle table bucket array |
676 | HandleTableMap *walk = &g_HandleTableMap; |
677 | while (walk) { |
678 | delete [] walk->pBuckets; |
679 | walk = walk->pNext; |
680 | } |
681 | |
682 | // null out the handle table array |
683 | g_HandleTableMap.pNext = NULL; |
684 | g_HandleTableMap.dwMaxIndex = 0; |
685 | |
686 | // null out the global table handle |
687 | g_HandleTableMap.pBuckets = NULL; |
688 | } |
689 | } |
690 | |
691 | #ifndef FEATURE_REDHAWK |
692 | bool Ref_InitializeHandleTableBucket(HandleTableBucket* bucket, void* context) |
693 | { |
694 | CONTRACTL |
695 | { |
696 | NOTHROW; |
697 | WRAPPER(GC_TRIGGERS); |
698 | INJECT_FAULT(return false); |
699 | } |
700 | CONTRACTL_END; |
701 | |
702 | HandleTableBucket *result = bucket; |
703 | HandleTableMap *walk = &g_HandleTableMap; |
704 | |
705 | HandleTableMap *last = NULL; |
706 | uint32_t offset = 0; |
707 | |
708 | result->pTable = NULL; |
709 | |
710 | // create handle table set for the bucket |
711 | int n_slots = getNumberOfSlots(); |
712 | |
713 | HandleTableBucketHolder bucketHolder(result, n_slots); |
714 | |
715 | result->pTable = new (nothrow) HHANDLETABLE[n_slots]; |
716 | if (!result->pTable) |
717 | { |
718 | return false; |
719 | } |
720 | |
721 | ZeroMemory(result->pTable, n_slots * sizeof(HHANDLETABLE)); |
722 | |
723 | for (int uCPUindex=0; uCPUindex < n_slots; uCPUindex++) { |
724 | result->pTable[uCPUindex] = HndCreateHandleTable(s_rgTypeFlags, _countof(s_rgTypeFlags), ADIndex((DWORD)(uintptr_t)context)); |
725 | if (!result->pTable[uCPUindex]) |
726 | return false; |
727 | } |
728 | |
729 | for (;;) { |
730 | // Do we have free slot |
731 | while (walk) { |
732 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) { |
733 | if (walk->pBuckets[i] == 0) { |
734 | for (int uCPUindex=0; uCPUindex < n_slots; uCPUindex++) |
735 | HndSetHandleTableIndex(result->pTable[uCPUindex], i+offset); |
736 | |
737 | result->HandleTableIndex = i+offset; |
738 | if (Interlocked::CompareExchangePointer(&walk->pBuckets[i], result, NULL) == 0) { |
739 | // Get a free slot. |
740 | bucketHolder.SuppressRelease(); |
741 | return true; |
742 | } |
743 | } |
744 | } |
745 | last = walk; |
746 | offset = walk->dwMaxIndex; |
747 | walk = walk->pNext; |
748 | } |
749 | |
750 | // No free slot. |
751 | // Let's create a new node |
752 | HandleTableMap *newMap = new (nothrow) HandleTableMap; |
753 | if (!newMap) |
754 | { |
755 | return false; |
756 | } |
757 | |
758 | newMap->pBuckets = new (nothrow) HandleTableBucket * [ INITIAL_HANDLE_TABLE_ARRAY_SIZE ]; |
759 | if (!newMap->pBuckets) |
760 | { |
761 | delete newMap; |
762 | return false; |
763 | } |
764 | |
765 | newMap->dwMaxIndex = last->dwMaxIndex + INITIAL_HANDLE_TABLE_ARRAY_SIZE; |
766 | newMap->pNext = NULL; |
767 | ZeroMemory(newMap->pBuckets, |
768 | INITIAL_HANDLE_TABLE_ARRAY_SIZE * sizeof (HandleTableBucket *)); |
769 | |
770 | if (Interlocked::CompareExchangePointer(&last->pNext, newMap, NULL) != NULL) |
771 | { |
772 | // This thread loses. |
773 | delete [] newMap->pBuckets; |
774 | delete newMap; |
775 | } |
776 | walk = last->pNext; |
777 | offset = last->dwMaxIndex; |
778 | } |
779 | } |
780 | #endif // !FEATURE_REDHAWK |
781 | |
782 | void Ref_RemoveHandleTableBucket(HandleTableBucket *pBucket) |
783 | { |
784 | LIMITED_METHOD_CONTRACT; |
785 | |
786 | size_t index = pBucket->HandleTableIndex; |
787 | HandleTableMap* walk = &g_HandleTableMap; |
788 | size_t offset = 0; |
789 | |
790 | while (walk) |
791 | { |
792 | if ((index < walk->dwMaxIndex) && (index >= offset)) |
793 | { |
794 | // During AppDomain unloading, we first remove a handle table and then destroy |
795 | // the table. As soon as the table is removed, the slot can be reused. |
796 | if (walk->pBuckets[index - offset] == pBucket) |
797 | { |
798 | walk->pBuckets[index - offset] = NULL; |
799 | return; |
800 | } |
801 | } |
802 | offset = walk->dwMaxIndex; |
803 | walk = walk->pNext; |
804 | } |
805 | |
806 | // Didn't find it. This will happen typically from Ref_DestroyHandleTableBucket if |
807 | // we explicitly call Ref_RemoveHandleTableBucket first. |
808 | } |
809 | |
810 | |
811 | void Ref_DestroyHandleTableBucket(HandleTableBucket *pBucket) |
812 | { |
813 | WRAPPER_NO_CONTRACT; |
814 | |
815 | Ref_RemoveHandleTableBucket(pBucket); |
816 | for (int uCPUindex=0; uCPUindex < getNumberOfSlots(); uCPUindex++) |
817 | { |
818 | HndDestroyHandleTable(pBucket->pTable[uCPUindex]); |
819 | } |
820 | delete [] pBucket->pTable; |
821 | } |
822 | |
823 | int getSlotNumber(ScanContext* sc) |
824 | { |
825 | WRAPPER_NO_CONTRACT; |
826 | |
827 | return (IsServerHeap() ? sc->thread_number : 0); |
828 | } |
829 | |
830 | // <TODO> - reexpress as complete only like hndtable does now!!! -fmh</REVISIT_TODO> |
831 | void Ref_EndSynchronousGC(uint32_t condemned, uint32_t maxgen) |
832 | { |
833 | LIMITED_METHOD_CONTRACT; |
834 | UNREFERENCED_PARAMETER(condemned); |
835 | UNREFERENCED_PARAMETER(maxgen); |
836 | |
837 | // NOT used, must be modified for MTHTS (scalable HandleTable scan) if planned to use: |
838 | // need to pass ScanContext info to split HT bucket by threads, or to be performed under t_join::join |
839 | /* |
840 | // tell the table we finished a GC |
841 | HandleTableMap *walk = &g_HandleTableMap; |
842 | while (walk) { |
843 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) { |
844 | HHANDLETABLE hTable = walk->pTable[i]; |
845 | if (hTable) |
846 | HndNotifyGcCycleComplete(hTable, condemned, maxgen); |
847 | } |
848 | walk = walk->pNext; |
849 | } |
850 | */ |
851 | } |
852 | |
853 | void SetDependentHandleSecondary(OBJECTHANDLE handle, OBJECTREF objref) |
854 | { |
855 | CONTRACTL |
856 | { |
857 | NOTHROW; |
858 | GC_NOTRIGGER; |
859 | SO_TOLERANT; |
860 | MODE_COOPERATIVE; |
861 | } |
862 | CONTRACTL_END; |
863 | |
864 | // sanity |
865 | _ASSERTE(handle); |
866 | |
867 | #ifdef _DEBUG |
868 | // handle should not be in unloaded domain |
869 | ValidateAppDomainForHandle(handle); |
870 | |
871 | // Make sure the objref is valid before it is assigned to a handle |
872 | ValidateAssignObjrefForHandle(objref, HndGetHandleTableADIndex(HndGetHandleTable(handle))); |
873 | #endif |
874 | // unwrap the objectref we were given |
875 | _UNCHECKED_OBJECTREF value = OBJECTREF_TO_UNCHECKED_OBJECTREF(objref); |
876 | |
877 | // if we are doing a non-NULL pointer store then invoke the write-barrier |
878 | if (value) |
879 | HndWriteBarrier(handle, objref); |
880 | |
881 | // store the pointer |
882 | HndSetHandleExtraInfo(handle, HNDTYPE_DEPENDENT, (uintptr_t)value); |
883 | } |
884 | |
885 | |
886 | //---------------------------------------------------------------------------- |
887 | |
888 | /* |
889 | * GetVariableHandleType. |
890 | * |
891 | * Retrieves the dynamic type of a variable-strength handle. |
892 | */ |
893 | uint32_t GetVariableHandleType(OBJECTHANDLE handle) |
894 | { |
895 | WRAPPER_NO_CONTRACT; |
896 | |
897 | return (uint32_t)HndGetHandleExtraInfo(handle); |
898 | } |
899 | |
900 | /* |
901 | * UpdateVariableHandleType. |
902 | * |
903 | * Changes the dynamic type of a variable-strength handle. |
904 | * |
905 | * N.B. This routine is not a macro since we do validation in RETAIL. |
906 | * We always validate the type here because it can come from external callers. |
907 | */ |
908 | void UpdateVariableHandleType(OBJECTHANDLE handle, uint32_t type) |
909 | { |
910 | WRAPPER_NO_CONTRACT; |
911 | |
912 | // verify that we are being asked to set a valid type |
913 | if (!IS_VALID_VHT_VALUE(type)) |
914 | { |
915 | // bogus value passed in |
916 | _ASSERTE(FALSE); |
917 | return; |
918 | } |
919 | |
920 | // <REVISIT_TODO> (francish) CONCURRENT GC NOTE</REVISIT_TODO> |
921 | // |
922 | // If/when concurrent GC is implemented, we need to make sure variable handles |
923 | // DON'T change type during an asynchronous scan, OR that we properly recover |
924 | // from the change. Some changes are benign, but for example changing to or |
925 | // from a pinning handle in the middle of a scan would not be fun. |
926 | // |
927 | |
928 | // store the type in the handle's extra info |
929 | HndSetHandleExtraInfo(handle, HNDTYPE_VARIABLE, (uintptr_t)type); |
930 | } |
931 | |
932 | /* |
933 | * CompareExchangeVariableHandleType. |
934 | * |
935 | * Changes the dynamic type of a variable-strength handle. Unlike UpdateVariableHandleType we assume that the |
936 | * types have already been validated. |
937 | */ |
938 | uint32_t CompareExchangeVariableHandleType(OBJECTHANDLE handle, uint32_t oldType, uint32_t newType) |
939 | { |
940 | WRAPPER_NO_CONTRACT; |
941 | |
942 | // verify that we are being asked to get/set valid types |
943 | _ASSERTE(IS_VALID_VHT_VALUE(oldType) && IS_VALID_VHT_VALUE(newType)); |
944 | |
945 | // attempt to store the type in the handle's extra info |
946 | return (uint32_t)HndCompareExchangeHandleExtraInfo(handle, HNDTYPE_VARIABLE, (uintptr_t)oldType, (uintptr_t)newType); |
947 | } |
948 | |
949 | |
950 | /* |
951 | * TraceVariableHandles. |
952 | * |
953 | * Convenience function for tracing variable-strength handles. |
954 | * Wraps HndScanHandlesForGC. |
955 | */ |
956 | void TraceVariableHandles(HANDLESCANPROC pfnTrace, uintptr_t lp1, uintptr_t lp2, uint32_t uEnableMask, uint32_t condemned, uint32_t maxgen, uint32_t flags) |
957 | { |
958 | WRAPPER_NO_CONTRACT; |
959 | |
960 | // set up to scan variable handles with the specified mask and trace function |
961 | uint32_t type = HNDTYPE_VARIABLE; |
962 | struct VARSCANINFO info = { (uintptr_t)uEnableMask, pfnTrace, lp2 }; |
963 | |
964 | HandleTableMap *walk = &g_HandleTableMap; |
965 | while (walk) { |
966 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i++) |
967 | if (walk->pBuckets[i] != NULL) |
968 | { |
969 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber((ScanContext*) lp1)]; |
970 | if (hTable) |
971 | { |
972 | #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING |
973 | if (g_fEnableARM) |
974 | { |
975 | ScanContext* sc = (ScanContext *)lp1; |
976 | sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable)); |
977 | } |
978 | #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING |
979 | HndScanHandlesForGC(hTable, VariableTraceDispatcher, |
980 | lp1, (uintptr_t)&info, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags); |
981 | } |
982 | } |
983 | walk = walk->pNext; |
984 | } |
985 | } |
986 | |
987 | /* |
988 | loop scan version of TraceVariableHandles for single-thread-managed Ref_* functions |
989 | should be kept in sync with the code above |
990 | */ |
991 | void TraceVariableHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1, uintptr_t lp2, uint32_t uEnableMask, uint32_t condemned, uint32_t maxgen, uint32_t flags) |
992 | { |
993 | WRAPPER_NO_CONTRACT; |
994 | |
995 | // set up to scan variable handles with the specified mask and trace function |
996 | uint32_t type = HNDTYPE_VARIABLE; |
997 | struct VARSCANINFO info = { (uintptr_t)uEnableMask, pfnTrace, lp2 }; |
998 | |
999 | HandleTableMap *walk = &g_HandleTableMap; |
1000 | while (walk) { |
1001 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1002 | if (walk->pBuckets[i] != NULL) |
1003 | { |
1004 | // this is the one of Ref_* function performed by single thread in MULTI_HEAPS case, so we need to loop through all HT of the bucket |
1005 | for (int uCPUindex=0; uCPUindex < getNumberOfSlots(); uCPUindex++) |
1006 | { |
1007 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex]; |
1008 | if (hTable) |
1009 | HndScanHandlesForGC(hTable, VariableTraceDispatcher, |
1010 | lp1, (uintptr_t)&info, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags); |
1011 | } |
1012 | } |
1013 | walk = walk->pNext; |
1014 | } |
1015 | } |
1016 | |
1017 | //---------------------------------------------------------------------------- |
1018 | |
1019 | void Ref_TracePinningRoots(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn) |
1020 | { |
1021 | WRAPPER_NO_CONTRACT; |
1022 | |
1023 | LOG((LF_GC, LL_INFO10000, "Pinning referents of pinned handles in generation %u\n" , condemned)); |
1024 | |
1025 | // pin objects pointed to by pinning handles |
1026 | uint32_t types[2] = {HNDTYPE_PINNED, HNDTYPE_ASYNCPINNED}; |
1027 | uint32_t flags = sc->concurrent ? HNDGCF_ASYNC : HNDGCF_NORMAL; |
1028 | |
1029 | HandleTableMap *walk = &g_HandleTableMap; |
1030 | while (walk) { |
1031 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1032 | if (walk->pBuckets[i] != NULL) |
1033 | { |
1034 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber((ScanContext*) sc)]; |
1035 | if (hTable) |
1036 | { |
1037 | #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING |
1038 | if (g_fEnableARM) |
1039 | { |
1040 | sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable)); |
1041 | } |
1042 | #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING |
1043 | |
1044 | // Pinned handles and async pinned handles are scanned in separate passes, since async pinned |
1045 | // handles may require a callback into the EE in order to fully trace an async pinned |
1046 | // object's object graph. |
1047 | HndScanHandlesForGC(hTable, PinObject, uintptr_t(sc), uintptr_t(fn), &types[0], 1, condemned, maxgen, flags); |
1048 | HndScanHandlesForGC(hTable, AsyncPinObject, uintptr_t(sc), uintptr_t(fn), &types[1], 1, condemned, maxgen, flags); |
1049 | } |
1050 | } |
1051 | walk = walk->pNext; |
1052 | } |
1053 | |
1054 | // pin objects pointed to by variable handles whose dynamic type is VHT_PINNED |
1055 | TraceVariableHandles(PinObject, uintptr_t(sc), uintptr_t(fn), VHT_PINNED, condemned, maxgen, flags); |
1056 | } |
1057 | |
1058 | |
1059 | void Ref_TraceNormalRoots(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn) |
1060 | { |
1061 | WRAPPER_NO_CONTRACT; |
1062 | |
1063 | LOG((LF_GC, LL_INFO10000, "Promoting referents of strong handles in generation %u\n" , condemned)); |
1064 | |
1065 | // promote objects pointed to by strong handles |
1066 | // during ephemeral GCs we also want to promote the ones pointed to by sizedref handles. |
1067 | uint32_t types[2] = {HNDTYPE_STRONG, HNDTYPE_SIZEDREF}; |
1068 | uint32_t uTypeCount = (((condemned >= maxgen) && !g_theGCHeap->IsConcurrentGCInProgress()) ? 1 : _countof(types)); |
1069 | uint32_t flags = (sc->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL; |
1070 | |
1071 | HandleTableMap *walk = &g_HandleTableMap; |
1072 | while (walk) { |
1073 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1074 | if (walk->pBuckets[i] != NULL) |
1075 | { |
1076 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)]; |
1077 | if (hTable) |
1078 | { |
1079 | #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING |
1080 | if (g_fEnableARM) |
1081 | { |
1082 | sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable)); |
1083 | } |
1084 | #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING |
1085 | |
1086 | HndScanHandlesForGC(hTable, PromoteObject, uintptr_t(sc), uintptr_t(fn), types, uTypeCount, condemned, maxgen, flags); |
1087 | } |
1088 | } |
1089 | walk = walk->pNext; |
1090 | } |
1091 | |
1092 | // promote objects pointed to by variable handles whose dynamic type is VHT_STRONG |
1093 | TraceVariableHandles(PromoteObject, uintptr_t(sc), uintptr_t(fn), VHT_STRONG, condemned, maxgen, flags); |
1094 | |
1095 | #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) |
1096 | // don't scan ref-counted handles during concurrent phase as the clean-up of CCWs can race with AD unload and cause AV's |
1097 | if (!sc->concurrent) |
1098 | { |
1099 | // promote ref-counted handles |
1100 | uint32_t type = HNDTYPE_REFCOUNTED; |
1101 | |
1102 | walk = &g_HandleTableMap; |
1103 | while (walk) { |
1104 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1105 | if (walk->pBuckets[i] != NULL) |
1106 | { |
1107 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)]; |
1108 | if (hTable) |
1109 | HndScanHandlesForGC(hTable, PromoteRefCounted, uintptr_t(sc), uintptr_t(fn), &type, 1, condemned, maxgen, flags ); |
1110 | } |
1111 | walk = walk->pNext; |
1112 | } |
1113 | } |
1114 | #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK |
1115 | } |
1116 | |
1117 | |
1118 | void Ref_TraceRefCountHandles(HANDLESCANPROC callback, uintptr_t lParam1, uintptr_t lParam2) |
1119 | { |
1120 | #ifdef FEATURE_COMINTEROP |
1121 | int max_slots = getNumberOfSlots(); |
1122 | uint32_t handleType = HNDTYPE_REFCOUNTED; |
1123 | |
1124 | HandleTableMap *walk = &g_HandleTableMap; |
1125 | while (walk) |
1126 | { |
1127 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i++) |
1128 | { |
1129 | if (walk->pBuckets[i] != NULL) |
1130 | { |
1131 | for (int j = 0; j < max_slots; j++) |
1132 | { |
1133 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[j]; |
1134 | if (hTable) |
1135 | HndEnumHandles(hTable, &handleType, 1, callback, lParam1, lParam2, false); |
1136 | } |
1137 | } |
1138 | } |
1139 | walk = walk->pNext; |
1140 | } |
1141 | #else |
1142 | UNREFERENCED_PARAMETER(callback); |
1143 | UNREFERENCED_PARAMETER(lParam1); |
1144 | UNREFERENCED_PARAMETER(lParam2); |
1145 | #endif // FEATURE_COMINTEROP |
1146 | } |
1147 | |
1148 | |
1149 | |
1150 | |
1151 | void Ref_CheckReachable(uint32_t condemned, uint32_t maxgen, uintptr_t lp1) |
1152 | { |
1153 | WRAPPER_NO_CONTRACT; |
1154 | |
1155 | LOG((LF_GC, LL_INFO10000, "Checking reachability of referents of long-weak handles in generation %u\n" , condemned)); |
1156 | |
1157 | // these are the handle types that need to be checked |
1158 | uint32_t types[] = |
1159 | { |
1160 | HNDTYPE_WEAK_LONG, |
1161 | #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) |
1162 | HNDTYPE_REFCOUNTED, |
1163 | #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK |
1164 | }; |
1165 | |
1166 | // check objects pointed to by short weak handles |
1167 | uint32_t flags = (((ScanContext*) lp1)->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL; |
1168 | int uCPUindex = getSlotNumber((ScanContext*) lp1); |
1169 | |
1170 | HandleTableMap *walk = &g_HandleTableMap; |
1171 | while (walk) { |
1172 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1173 | { |
1174 | if (walk->pBuckets[i] != NULL) |
1175 | { |
1176 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex]; |
1177 | if (hTable) |
1178 | HndScanHandlesForGC(hTable, CheckPromoted, lp1, 0, types, _countof(types), condemned, maxgen, flags); |
1179 | } |
1180 | } |
1181 | walk = walk->pNext; |
1182 | } |
1183 | |
1184 | // check objects pointed to by variable handles whose dynamic type is VHT_WEAK_LONG |
1185 | TraceVariableHandles(CheckPromoted, lp1, 0, VHT_WEAK_LONG, condemned, maxgen, flags); |
1186 | } |
1187 | |
1188 | // |
1189 | // Dependent handles manages the relationship between primary and secondary objects, where the lifetime of |
1190 | // the secondary object is dependent upon that of the primary. The handle itself holds the primary instance, |
1191 | // while the extra handle info holds the secondary object. The secondary object should always be promoted |
1192 | // when the primary is, and the handle should be cleared if the primary is not promoted. Can't use ordinary |
1193 | // strong handle to refer to the secondary as this could case a cycle in the graph if the secondary somehow |
1194 | // pointed back to the primary. Can't use weak handle because that would not keep the secondary object alive. |
1195 | // |
1196 | // The result is that a dependenHandle has the EFFECT of |
1197 | // * long weak handles in both the primary and secondary objects |
1198 | // * a strong reference from the primary object to the secondary one |
1199 | // |
1200 | // Dependent handles are currently used for |
1201 | // |
1202 | // * managing fields added to EnC classes, where the handle itself holds the this pointer and the |
1203 | // secondary object represents the new field that was added. |
1204 | // * it is exposed to managed code (as System.Runtime.CompilerServices.DependentHandle) and is used in the |
1205 | // implementation of ConditionWeakTable. |
1206 | // |
1207 | |
1208 | // Retrieve the dependent handle context associated with the current GC scan context. |
1209 | DhContext *Ref_GetDependentHandleContext(ScanContext* sc) |
1210 | { |
1211 | WRAPPER_NO_CONTRACT; |
1212 | return &g_pDependentHandleContexts[getSlotNumber(sc)]; |
1213 | } |
1214 | |
1215 | // Scan the dependent handle table promoting any secondary object whose associated primary object is promoted. |
1216 | // |
1217 | // Multiple scans may be required since (a) secondary promotions made during one scan could cause the primary |
1218 | // of another handle to be promoted and (b) the GC may not have marked all promoted objects at the time it |
1219 | // initially calls us. |
1220 | // |
1221 | // Returns true if any promotions resulted from this scan. |
1222 | bool Ref_ScanDependentHandlesForPromotion(DhContext *pDhContext) |
1223 | { |
1224 | LOG((LF_GC, LL_INFO10000, "Checking liveness of referents of dependent handles in generation %u\n" , pDhContext->m_iCondemned)); |
1225 | uint32_t type = HNDTYPE_DEPENDENT; |
1226 | uint32_t flags = (pDhContext->m_pScanContext->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL; |
1227 | flags |= HNDGCF_EXTRAINFO; |
1228 | |
1229 | // Keep a note of whether we promoted anything over the entire scan (not just the last iteration). We need |
1230 | // to return this data since under server GC promotions from this table may cause further promotions in |
1231 | // tables handled by other threads. |
1232 | bool fAnyPromotions = false; |
1233 | |
1234 | // Keep rescanning the table while both the following conditions are true: |
1235 | // 1) There's at least primary object left that could have been promoted. |
1236 | // 2) We performed at least one secondary promotion (which could have caused a primary promotion) on the |
1237 | // last scan. |
1238 | // Note that even once we terminate the GC may call us again (because it has caused more objects to be |
1239 | // marked as promoted). But we scan in a loop here anyway because it is cheaper for us to loop than the GC |
1240 | // (especially on server GC where each external cycle has to be synchronized between GC worker threads). |
1241 | do |
1242 | { |
1243 | // Assume the conditions for re-scanning are both false initially. The scan callback below |
1244 | // (PromoteDependentHandle) will set the relevant flag on the first unpromoted primary it sees or |
1245 | // secondary promotion it performs. |
1246 | pDhContext->m_fUnpromotedPrimaries = false; |
1247 | pDhContext->m_fPromoted = false; |
1248 | |
1249 | HandleTableMap *walk = &g_HandleTableMap; |
1250 | while (walk) |
1251 | { |
1252 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1253 | { |
1254 | if (walk->pBuckets[i] != NULL) |
1255 | { |
1256 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(pDhContext->m_pScanContext)]; |
1257 | if (hTable) |
1258 | { |
1259 | HndScanHandlesForGC(hTable, |
1260 | PromoteDependentHandle, |
1261 | uintptr_t(pDhContext->m_pScanContext), |
1262 | uintptr_t(pDhContext->m_pfnPromoteFunction), |
1263 | &type, 1, |
1264 | pDhContext->m_iCondemned, |
1265 | pDhContext->m_iMaxGen, |
1266 | flags ); |
1267 | } |
1268 | } |
1269 | } |
1270 | walk = walk->pNext; |
1271 | } |
1272 | |
1273 | if (pDhContext->m_fPromoted) |
1274 | fAnyPromotions = true; |
1275 | |
1276 | } while (pDhContext->m_fUnpromotedPrimaries && pDhContext->m_fPromoted); |
1277 | |
1278 | return fAnyPromotions; |
1279 | } |
1280 | |
1281 | // Perform a scan of dependent handles for the purpose of clearing any that haven't had their primary |
1282 | // promoted. |
1283 | void Ref_ScanDependentHandlesForClearing(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn) |
1284 | { |
1285 | LOG((LF_GC, LL_INFO10000, "Clearing dead dependent handles in generation %u\n" , condemned)); |
1286 | uint32_t type = HNDTYPE_DEPENDENT; |
1287 | uint32_t flags = (sc->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL; |
1288 | flags |= HNDGCF_EXTRAINFO; |
1289 | |
1290 | HandleTableMap *walk = &g_HandleTableMap; |
1291 | while (walk) |
1292 | { |
1293 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1294 | { |
1295 | if (walk->pBuckets[i] != NULL) |
1296 | { |
1297 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)]; |
1298 | if (hTable) |
1299 | { |
1300 | HndScanHandlesForGC(hTable, ClearDependentHandle, uintptr_t(sc), uintptr_t(fn), &type, 1, condemned, maxgen, flags ); |
1301 | } |
1302 | } |
1303 | } |
1304 | walk = walk->pNext; |
1305 | } |
1306 | } |
1307 | |
1308 | // Perform a scan of dependent handles for the purpose of updating handles to track relocated objects. |
1309 | void Ref_ScanDependentHandlesForRelocation(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn) |
1310 | { |
1311 | LOG((LF_GC, LL_INFO10000, "Relocating moved dependent handles in generation %u\n" , condemned)); |
1312 | uint32_t type = HNDTYPE_DEPENDENT; |
1313 | uint32_t flags = (sc->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL; |
1314 | flags |= HNDGCF_EXTRAINFO; |
1315 | |
1316 | HandleTableMap *walk = &g_HandleTableMap; |
1317 | while (walk) |
1318 | { |
1319 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1320 | { |
1321 | if (walk->pBuckets[i] != NULL) |
1322 | { |
1323 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)]; |
1324 | if (hTable) |
1325 | { |
1326 | HndScanHandlesForGC(hTable, UpdateDependentHandle, uintptr_t(sc), uintptr_t(fn), &type, 1, condemned, maxgen, flags ); |
1327 | } |
1328 | } |
1329 | } |
1330 | walk = walk->pNext; |
1331 | } |
1332 | } |
1333 | |
1334 | /* |
1335 | loop scan version of TraceVariableHandles for single-thread-managed Ref_* functions |
1336 | should be kept in sync with the code above |
1337 | Only used by profiling/ETW. |
1338 | */ |
1339 | void TraceDependentHandlesBySingleThread(HANDLESCANPROC pfnTrace, uintptr_t lp1, uintptr_t lp2, uint32_t condemned, uint32_t maxgen, uint32_t flags) |
1340 | { |
1341 | WRAPPER_NO_CONTRACT; |
1342 | |
1343 | // set up to scan variable handles with the specified mask and trace function |
1344 | uint32_t type = HNDTYPE_DEPENDENT; |
1345 | struct DIAG_DEPSCANINFO info = { pfnTrace, lp2 }; |
1346 | |
1347 | HandleTableMap *walk = &g_HandleTableMap; |
1348 | while (walk) { |
1349 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1350 | if (walk->pBuckets[i] != NULL) |
1351 | { |
1352 | // this is the one of Ref_* function performed by single thread in MULTI_HEAPS case, so we need to loop through all HT of the bucket |
1353 | for (int uCPUindex=0; uCPUindex < getNumberOfSlots(); uCPUindex++) |
1354 | { |
1355 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex]; |
1356 | if (hTable) |
1357 | HndScanHandlesForGC(hTable, TraceDependentHandle, |
1358 | lp1, (uintptr_t)&info, &type, 1, condemned, maxgen, HNDGCF_EXTRAINFO | flags); |
1359 | } |
1360 | } |
1361 | walk = walk->pNext; |
1362 | } |
1363 | } |
1364 | |
1365 | // We scan handle tables by their buckets (ie, AD index). We could get into the situation where |
1366 | // the AD indices are not very compacted (for example if we have just unloaded ADs and their |
1367 | // indices haven't been reused yet) and we could be scanning them in an unbalanced fashion. |
1368 | // Consider using an array to represent the compacted form of all AD indices exist for the |
1369 | // sized ref handles. |
1370 | void ScanSizedRefByAD(uint32_t maxgen, HANDLESCANPROC scanProc, ScanContext* sc, Ref_promote_func* fn, uint32_t flags) |
1371 | { |
1372 | HandleTableMap *walk = &g_HandleTableMap; |
1373 | uint32_t type = HNDTYPE_SIZEDREF; |
1374 | int uCPUindex = getSlotNumber(sc); |
1375 | int n_slots = g_theGCHeap->GetNumberOfHeaps(); |
1376 | |
1377 | while (walk) |
1378 | { |
1379 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1380 | { |
1381 | if (walk->pBuckets[i] != NULL) |
1382 | { |
1383 | ADIndex adIndex = HndGetHandleTableADIndex(walk->pBuckets[i]->pTable[0]); |
1384 | if ((adIndex.m_dwIndex % n_slots) == (uint32_t)uCPUindex) |
1385 | { |
1386 | for (int index = 0; index < n_slots; index++) |
1387 | { |
1388 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[index]; |
1389 | if (hTable) |
1390 | { |
1391 | #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING |
1392 | if (g_fEnableARM) |
1393 | { |
1394 | sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(adIndex); |
1395 | } |
1396 | #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING |
1397 | HndScanHandlesForGC(hTable, scanProc, uintptr_t(sc), uintptr_t(fn), &type, 1, maxgen, maxgen, flags); |
1398 | } |
1399 | } |
1400 | } |
1401 | } |
1402 | } |
1403 | walk = walk->pNext; |
1404 | } |
1405 | } |
1406 | |
1407 | void ScanSizedRefByCPU(uint32_t maxgen, HANDLESCANPROC scanProc, ScanContext* sc, Ref_promote_func* fn, uint32_t flags) |
1408 | { |
1409 | HandleTableMap *walk = &g_HandleTableMap; |
1410 | uint32_t type = HNDTYPE_SIZEDREF; |
1411 | int uCPUindex = getSlotNumber(sc); |
1412 | |
1413 | while (walk) |
1414 | { |
1415 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1416 | { |
1417 | if (walk->pBuckets[i] != NULL) |
1418 | { |
1419 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex]; |
1420 | if (hTable) |
1421 | { |
1422 | #ifdef FEATURE_APPDOMAIN_RESOURCE_MONITORING |
1423 | if (g_fEnableARM) |
1424 | { |
1425 | sc->pCurrentDomain = SystemDomain::GetAppDomainAtIndex(HndGetHandleTableADIndex(hTable)); |
1426 | } |
1427 | #endif //FEATURE_APPDOMAIN_RESOURCE_MONITORING |
1428 | |
1429 | HndScanHandlesForGC(hTable, scanProc, uintptr_t(sc), uintptr_t(fn), &type, 1, maxgen, maxgen, flags); |
1430 | } |
1431 | } |
1432 | } |
1433 | walk = walk->pNext; |
1434 | } |
1435 | } |
1436 | |
1437 | void Ref_ScanSizedRefHandles(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn) |
1438 | { |
1439 | LOG((LF_GC, LL_INFO10000, "Scanning SizedRef handles to in generation %u\n" , condemned)); |
1440 | UNREFERENCED_PARAMETER(condemned); |
1441 | _ASSERTE (condemned == maxgen); |
1442 | uint32_t flags = (sc->concurrent ? HNDGCF_ASYNC : HNDGCF_NORMAL) | HNDGCF_EXTRAINFO; |
1443 | |
1444 | ScanSizedRefByCPU(maxgen, CalculateSizedRefSize, sc, fn, flags); |
1445 | } |
1446 | |
1447 | void Ref_CheckAlive(uint32_t condemned, uint32_t maxgen, uintptr_t lp1) |
1448 | { |
1449 | WRAPPER_NO_CONTRACT; |
1450 | |
1451 | LOG((LF_GC, LL_INFO10000, "Checking liveness of referents of short-weak handles in generation %u\n" , condemned)); |
1452 | |
1453 | // perform a multi-type scan that checks for unreachable objects |
1454 | uint32_t types[] = |
1455 | { |
1456 | HNDTYPE_WEAK_SHORT |
1457 | #ifdef FEATURE_COMINTEROP |
1458 | , HNDTYPE_WEAK_WINRT |
1459 | #endif // FEATURE_COMINTEROP |
1460 | }; |
1461 | uint32_t flags = (((ScanContext*) lp1)->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL; |
1462 | |
1463 | int uCPUindex = getSlotNumber((ScanContext*) lp1); |
1464 | HandleTableMap *walk = &g_HandleTableMap; |
1465 | while (walk) |
1466 | { |
1467 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1468 | { |
1469 | if (walk->pBuckets[i] != NULL) |
1470 | { |
1471 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex]; |
1472 | if (hTable) |
1473 | HndScanHandlesForGC(hTable, CheckPromoted, lp1, 0, types, _countof(types), condemned, maxgen, flags); |
1474 | } |
1475 | } |
1476 | walk = walk->pNext; |
1477 | } |
1478 | // check objects pointed to by variable handles whose dynamic type is VHT_WEAK_SHORT |
1479 | TraceVariableHandles(CheckPromoted, lp1, 0, VHT_WEAK_SHORT, condemned, maxgen, flags); |
1480 | } |
1481 | |
1482 | static VOLATILE(int32_t) uCount = 0; |
1483 | |
1484 | // NOTE: Please: if you update this function, update the very similar profiling function immediately below!!! |
1485 | void Ref_UpdatePointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn) |
1486 | { |
1487 | WRAPPER_NO_CONTRACT; |
1488 | |
1489 | // For now, treat the syncblock as if it were short weak handles. <REVISIT_TODO>Later, get |
1490 | // the benefits of fast allocation / free & generational awareness by supporting |
1491 | // the SyncTable as a new block type. |
1492 | // @TODO cwb: wait for compelling performance measurements.</REVISIT_TODO> |
1493 | BOOL bDo = TRUE; |
1494 | |
1495 | if (IsServerHeap()) |
1496 | { |
1497 | bDo = (Interlocked::Increment(&uCount) == 1); |
1498 | Interlocked::CompareExchange (&uCount, 0, g_theGCHeap->GetNumberOfHeaps()); |
1499 | _ASSERTE (uCount <= g_theGCHeap->GetNumberOfHeaps()); |
1500 | } |
1501 | |
1502 | if (bDo) |
1503 | GCToEEInterface::SyncBlockCacheWeakPtrScan(&UpdatePointer, uintptr_t(sc), uintptr_t(fn)); |
1504 | |
1505 | LOG((LF_GC, LL_INFO10000, "Updating pointers to referents of non-pinning handles in generation %u\n" , condemned)); |
1506 | |
1507 | // these are the handle types that need their pointers updated |
1508 | uint32_t types[] = |
1509 | { |
1510 | HNDTYPE_WEAK_SHORT, |
1511 | HNDTYPE_WEAK_LONG, |
1512 | HNDTYPE_STRONG, |
1513 | #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) |
1514 | HNDTYPE_REFCOUNTED, |
1515 | #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK |
1516 | #ifdef FEATURE_COMINTEROP |
1517 | HNDTYPE_WEAK_WINRT, |
1518 | #endif // FEATURE_COMINTEROP |
1519 | HNDTYPE_SIZEDREF, |
1520 | }; |
1521 | |
1522 | // perform a multi-type scan that updates pointers |
1523 | uint32_t flags = (sc->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL; |
1524 | |
1525 | HandleTableMap *walk = &g_HandleTableMap; |
1526 | while (walk) { |
1527 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1528 | if (walk->pBuckets[i] != NULL) |
1529 | { |
1530 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)]; |
1531 | if (hTable) |
1532 | HndScanHandlesForGC(hTable, UpdatePointer, uintptr_t(sc), uintptr_t(fn), types, _countof(types), condemned, maxgen, flags); |
1533 | } |
1534 | walk = walk->pNext; |
1535 | } |
1536 | |
1537 | // update pointers in variable handles whose dynamic type is VHT_WEAK_SHORT, VHT_WEAK_LONG or VHT_STRONG |
1538 | TraceVariableHandles(UpdatePointer, uintptr_t(sc), uintptr_t(fn), VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, condemned, maxgen, flags); |
1539 | } |
1540 | |
1541 | #if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
1542 | |
1543 | // Please update this if you change the Ref_UpdatePointers function above. |
1544 | void Ref_ScanHandlesForProfilerAndETW(uint32_t maxgen, uintptr_t lp1, handle_scan_fn fn) |
1545 | { |
1546 | WRAPPER_NO_CONTRACT; |
1547 | |
1548 | LOG((LF_GC | LF_CORPROF, LL_INFO10000, "Scanning all handle roots for profiler.\n" )); |
1549 | |
1550 | // Don't scan the sync block because they should not be reported. They are weak handles only |
1551 | |
1552 | // <REVISIT_TODO>We should change the following to not report weak either |
1553 | // these are the handle types that need their pointers updated</REVISIT_TODO> |
1554 | uint32_t types[] = |
1555 | { |
1556 | HNDTYPE_WEAK_SHORT, |
1557 | HNDTYPE_WEAK_LONG, |
1558 | HNDTYPE_STRONG, |
1559 | #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) |
1560 | HNDTYPE_REFCOUNTED, |
1561 | #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK |
1562 | #ifdef FEATURE_COMINTEROP |
1563 | HNDTYPE_WEAK_WINRT, |
1564 | #endif // FEATURE_COMINTEROP |
1565 | HNDTYPE_PINNED, |
1566 | // HNDTYPE_VARIABLE, |
1567 | HNDTYPE_ASYNCPINNED, |
1568 | HNDTYPE_SIZEDREF, |
1569 | }; |
1570 | |
1571 | uint32_t flags = HNDGCF_NORMAL; |
1572 | |
1573 | // perform a multi-type scan that updates pointers |
1574 | HandleTableMap *walk = &g_HandleTableMap; |
1575 | while (walk) { |
1576 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1577 | if (walk->pBuckets[i] != NULL) |
1578 | // this is the one of Ref_* function performed by single thread in MULTI_HEAPS case, so we need to loop through all HT of the bucket |
1579 | for (int uCPUindex=0; uCPUindex < getNumberOfSlots(); uCPUindex++) |
1580 | { |
1581 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex]; |
1582 | if (hTable) |
1583 | HndScanHandlesForGC(hTable, &ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, types, _countof(types), maxgen, maxgen, flags); |
1584 | } |
1585 | walk = walk->pNext; |
1586 | } |
1587 | |
1588 | // update pointers in variable handles whose dynamic type is VHT_WEAK_SHORT, VHT_WEAK_LONG or VHT_STRONG |
1589 | TraceVariableHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, maxgen, maxgen, flags); |
1590 | } |
1591 | |
1592 | void Ref_ScanDependentHandlesForProfilerAndETW(uint32_t maxgen, ScanContext * SC, handle_scan_fn fn) |
1593 | { |
1594 | WRAPPER_NO_CONTRACT; |
1595 | |
1596 | LOG((LF_GC | LF_CORPROF, LL_INFO10000, "Scanning dependent handles for profiler.\n" )); |
1597 | |
1598 | uint32_t flags = HNDGCF_NORMAL; |
1599 | |
1600 | uintptr_t lp1 = (uintptr_t)SC; |
1601 | TraceDependentHandlesBySingleThread(&ScanPointerForProfilerAndETW, lp1, (uintptr_t)fn, maxgen, maxgen, flags); |
1602 | } |
1603 | |
1604 | #endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE) |
1605 | |
1606 | // Callback to enumerate all object references held in handles. |
1607 | void CALLBACK ScanPointer(_UNCHECKED_OBJECTREF *pObjRef, uintptr_t *, uintptr_t lp1, uintptr_t lp2) |
1608 | { |
1609 | WRAPPER_NO_CONTRACT; |
1610 | UNREFERENCED_PARAMETER(pExtraInfo); |
1611 | |
1612 | Object **pRef = (Object **)pObjRef; |
1613 | _ASSERTE(lp2); |
1614 | promote_func* callback = (promote_func*)lp2; |
1615 | callback(pRef, (ScanContext *)lp1, 0); |
1616 | } |
1617 | |
1618 | // Enumerate all object references held by any of the handle tables in the system. |
1619 | void Ref_ScanPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn) |
1620 | { |
1621 | WRAPPER_NO_CONTRACT; |
1622 | |
1623 | uint32_t types[] = |
1624 | { |
1625 | HNDTYPE_WEAK_SHORT, |
1626 | HNDTYPE_WEAK_LONG, |
1627 | HNDTYPE_STRONG, |
1628 | #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) |
1629 | HNDTYPE_REFCOUNTED, |
1630 | #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK |
1631 | HNDTYPE_PINNED, |
1632 | HNDTYPE_ASYNCPINNED, |
1633 | HNDTYPE_SIZEDREF, |
1634 | }; |
1635 | |
1636 | uint32_t flags = HNDGCF_NORMAL; |
1637 | |
1638 | // perform a multi-type scan that enumerates pointers |
1639 | for (HandleTableMap * walk = &g_HandleTableMap; |
1640 | walk != nullptr; |
1641 | walk = walk->pNext) |
1642 | { |
1643 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i++) |
1644 | { |
1645 | if (walk->pBuckets[i] != NULL) |
1646 | { |
1647 | // this is the one of Ref_* function performed by single thread in MULTI_HEAPS case, so we need to loop through all HT of the bucket |
1648 | for (int uCPUindex = 0; uCPUindex < getNumberOfSlots(); uCPUindex++) |
1649 | { |
1650 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex]; |
1651 | if (hTable) |
1652 | HndScanHandlesForGC(hTable, &ScanPointer, uintptr_t(sc), uintptr_t(fn), types, _countof(types), condemned, maxgen, flags); |
1653 | } |
1654 | } |
1655 | } |
1656 | } |
1657 | |
1658 | // enumerate pointers in variable handles whose dynamic type is VHT_WEAK_SHORT, VHT_WEAK_LONG or VHT_STRONG |
1659 | TraceVariableHandlesBySingleThread(&ScanPointer, uintptr_t(sc), uintptr_t(fn), VHT_WEAK_SHORT | VHT_WEAK_LONG | VHT_STRONG, condemned, maxgen, flags); |
1660 | } |
1661 | |
1662 | void Ref_UpdatePinnedPointers(uint32_t condemned, uint32_t maxgen, ScanContext* sc, Ref_promote_func* fn) |
1663 | { |
1664 | WRAPPER_NO_CONTRACT; |
1665 | |
1666 | LOG((LF_GC, LL_INFO10000, "Updating pointers to referents of pinning handles in generation %u\n" , condemned)); |
1667 | |
1668 | // these are the handle types that need their pointers updated |
1669 | uint32_t types[2] = {HNDTYPE_PINNED, HNDTYPE_ASYNCPINNED}; |
1670 | uint32_t flags = (sc->concurrent) ? HNDGCF_ASYNC : HNDGCF_NORMAL; |
1671 | |
1672 | HandleTableMap *walk = &g_HandleTableMap; |
1673 | while (walk) { |
1674 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1675 | if (walk->pBuckets[i] != NULL) |
1676 | { |
1677 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)]; |
1678 | if (hTable) |
1679 | HndScanHandlesForGC(hTable, UpdatePointerPinned, uintptr_t(sc), uintptr_t(fn), types, _countof(types), condemned, maxgen, flags); |
1680 | } |
1681 | walk = walk->pNext; |
1682 | } |
1683 | |
1684 | // update pointers in variable handles whose dynamic type is VHT_PINNED |
1685 | TraceVariableHandles(UpdatePointerPinned, uintptr_t(sc), uintptr_t(fn), VHT_PINNED, condemned, maxgen, flags); |
1686 | } |
1687 | |
1688 | |
1689 | void Ref_AgeHandles(uint32_t condemned, uint32_t maxgen, uintptr_t lp1) |
1690 | { |
1691 | WRAPPER_NO_CONTRACT; |
1692 | |
1693 | LOG((LF_GC, LL_INFO10000, "Aging handles in generation %u\n" , condemned)); |
1694 | |
1695 | // these are the handle types that need their ages updated |
1696 | uint32_t types[] = |
1697 | { |
1698 | HNDTYPE_WEAK_SHORT, |
1699 | HNDTYPE_WEAK_LONG, |
1700 | |
1701 | HNDTYPE_STRONG, |
1702 | |
1703 | HNDTYPE_PINNED, |
1704 | HNDTYPE_VARIABLE, |
1705 | #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) |
1706 | HNDTYPE_REFCOUNTED, |
1707 | #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK |
1708 | #ifdef FEATURE_COMINTEROP |
1709 | HNDTYPE_WEAK_WINRT, |
1710 | #endif // FEATURE_COMINTEROP |
1711 | HNDTYPE_ASYNCPINNED, |
1712 | HNDTYPE_SIZEDREF, |
1713 | }; |
1714 | |
1715 | int uCPUindex = getSlotNumber((ScanContext*) lp1); |
1716 | // perform a multi-type scan that ages the handles |
1717 | HandleTableMap *walk = &g_HandleTableMap; |
1718 | while (walk) { |
1719 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1720 | if (walk->pBuckets[i] != NULL) |
1721 | { |
1722 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex]; |
1723 | if (hTable) |
1724 | HndScanHandlesForGC(hTable, NULL, 0, 0, types, _countof(types), condemned, maxgen, HNDGCF_AGE); |
1725 | } |
1726 | walk = walk->pNext; |
1727 | } |
1728 | } |
1729 | |
1730 | |
1731 | void Ref_RejuvenateHandles(uint32_t condemned, uint32_t maxgen, uintptr_t lp1) |
1732 | { |
1733 | WRAPPER_NO_CONTRACT; |
1734 | |
1735 | LOG((LF_GC, LL_INFO10000, "Rejuvenating handles.\n" )); |
1736 | |
1737 | // these are the handle types that need their ages updated |
1738 | uint32_t types[] = |
1739 | { |
1740 | HNDTYPE_WEAK_SHORT, |
1741 | HNDTYPE_WEAK_LONG, |
1742 | |
1743 | |
1744 | HNDTYPE_STRONG, |
1745 | |
1746 | HNDTYPE_PINNED, |
1747 | HNDTYPE_VARIABLE, |
1748 | #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) |
1749 | HNDTYPE_REFCOUNTED, |
1750 | #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK |
1751 | #ifdef FEATURE_COMINTEROP |
1752 | HNDTYPE_WEAK_WINRT, |
1753 | #endif // FEATURE_COMINTEROP |
1754 | HNDTYPE_ASYNCPINNED, |
1755 | HNDTYPE_SIZEDREF, |
1756 | }; |
1757 | |
1758 | int uCPUindex = getSlotNumber((ScanContext*) lp1); |
1759 | // reset the ages of these handles |
1760 | HandleTableMap *walk = &g_HandleTableMap; |
1761 | while (walk) { |
1762 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1763 | if (walk->pBuckets[i] != NULL) |
1764 | { |
1765 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[uCPUindex]; |
1766 | if (hTable) |
1767 | HndResetAgeMap(hTable, types, _countof(types), condemned, maxgen, HNDGCF_NORMAL); |
1768 | } |
1769 | walk = walk->pNext; |
1770 | } |
1771 | } |
1772 | |
1773 | void Ref_VerifyHandleTable(uint32_t condemned, uint32_t maxgen, ScanContext* sc) |
1774 | { |
1775 | WRAPPER_NO_CONTRACT; |
1776 | |
1777 | LOG((LF_GC, LL_INFO10000, "Verifying handles.\n" )); |
1778 | |
1779 | // these are the handle types that need to be verified |
1780 | uint32_t types[] = |
1781 | { |
1782 | HNDTYPE_WEAK_SHORT, |
1783 | HNDTYPE_WEAK_LONG, |
1784 | |
1785 | |
1786 | HNDTYPE_STRONG, |
1787 | |
1788 | HNDTYPE_PINNED, |
1789 | HNDTYPE_VARIABLE, |
1790 | #if defined(FEATURE_COMINTEROP) || defined(FEATURE_REDHAWK) |
1791 | HNDTYPE_REFCOUNTED, |
1792 | #endif // FEATURE_COMINTEROP || FEATURE_REDHAWK |
1793 | #ifdef FEATURE_COMINTEROP |
1794 | HNDTYPE_WEAK_WINRT, |
1795 | #endif // FEATURE_COMINTEROP |
1796 | HNDTYPE_ASYNCPINNED, |
1797 | HNDTYPE_SIZEDREF, |
1798 | HNDTYPE_DEPENDENT, |
1799 | }; |
1800 | |
1801 | // verify these handles |
1802 | HandleTableMap *walk = &g_HandleTableMap; |
1803 | while (walk) |
1804 | { |
1805 | for (uint32_t i = 0; i < INITIAL_HANDLE_TABLE_ARRAY_SIZE; i ++) |
1806 | { |
1807 | if (walk->pBuckets[i] != NULL) |
1808 | { |
1809 | HHANDLETABLE hTable = walk->pBuckets[i]->pTable[getSlotNumber(sc)]; |
1810 | if (hTable) |
1811 | HndVerifyTable(hTable, types, _countof(types), condemned, maxgen, HNDGCF_NORMAL); |
1812 | } |
1813 | } |
1814 | walk = walk->pNext; |
1815 | } |
1816 | } |
1817 | |
1818 | int GetCurrentThreadHomeHeapNumber() |
1819 | { |
1820 | WRAPPER_NO_CONTRACT; |
1821 | |
1822 | assert(g_theGCHeap != nullptr); |
1823 | return g_theGCHeap->GetHomeHeapNumber(); |
1824 | } |
1825 | |
1826 | bool HandleTableBucket::Contains(OBJECTHANDLE handle) |
1827 | { |
1828 | LIMITED_METHOD_CONTRACT; |
1829 | |
1830 | if (NULL == handle) |
1831 | { |
1832 | return FALSE; |
1833 | } |
1834 | |
1835 | HHANDLETABLE hTable = HndGetHandleTable(handle); |
1836 | for (int uCPUindex=0; uCPUindex < g_theGCHeap->GetNumberOfHeaps(); uCPUindex++) |
1837 | { |
1838 | if (hTable == this->pTable[uCPUindex]) |
1839 | { |
1840 | return TRUE; |
1841 | } |
1842 | } |
1843 | return FALSE; |
1844 | } |
1845 | |
1846 | #endif // !DACCESS_COMPILE |
1847 | |
1848 | GC_DAC_VISIBLE |
1849 | OBJECTREF GetDependentHandleSecondary(OBJECTHANDLE handle) |
1850 | { |
1851 | WRAPPER_NO_CONTRACT; |
1852 | |
1853 | return UNCHECKED_OBJECTREF_TO_OBJECTREF((_UNCHECKED_OBJECTREF)HndGetHandleExtraInfo(handle)); |
1854 | } |
1855 | |
1856 | void PopulateHandleTableDacVars(GcDacVars* gcDacVars) |
1857 | { |
1858 | static_assert(offsetof(HandleTableMap, pBuckets) == offsetof(dac_handle_table_map, pBuckets), "handle table map DAC layout mismatch" ); |
1859 | static_assert(offsetof(HandleTableMap, pNext) == offsetof(dac_handle_table_map, pNext), "handle table map DAC layout mismatch" ); |
1860 | static_assert(offsetof(HandleTableMap, dwMaxIndex) == offsetof(dac_handle_table_map, dwMaxIndex), "handle table map DAC layout mismatch" ); |
1861 | static_assert(offsetof(HandleTableBucket, pTable) == offsetof(dac_handle_table_bucket, pTable), "handle table bucket DAC layout mismatch" ); |
1862 | static_assert(offsetof(HandleTableBucket, HandleTableIndex) == offsetof(dac_handle_table_bucket, HandleTableIndex), "handle table bucket DAC layout mismatch" ); |
1863 | static_assert(offsetof(HandleTable, uADIndex) == offsetof(dac_handle_table, uADIndex), "handle table DAC layout mismatch" ); |
1864 | |
1865 | #ifndef DACCESS_COMPILE |
1866 | gcDacVars->handle_table_map = reinterpret_cast<dac_handle_table_map*>(&g_HandleTableMap); |
1867 | #endif // DACCESS_COMPILE |
1868 | } |
1869 | |