1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | /* |
6 | * Generational GC handle manager. Table Scanning Routines. |
7 | * |
8 | * Implements support for scanning handles in the table. |
9 | * |
10 | |
11 | * |
12 | */ |
13 | |
14 | #include "common.h" |
15 | |
16 | #include "gcenv.h" |
17 | |
18 | #include "gc.h" |
19 | |
20 | #include "objecthandle.h" |
21 | #include "handletablepriv.h" |
22 | |
23 | |
24 | /**************************************************************************** |
25 | * |
26 | * DEFINITIONS FOR WRITE-BARRIER HANDLING |
27 | * |
28 | ****************************************************************************/ |
29 | /* |
30 | How the macros work: |
31 | Handle table's generation (TableSegmentHeader::rgGeneration) is actually a byte array, each byte is generation of a clump. |
32 | However it's often used as a uint32_t array for perf reasons, 1 uint32_t contains 4 bytes for ages of 4 clumps. Operations on such |
33 | a uint32_t include: |
34 | |
35 | 1. COMPUTE_CLUMP_MASK. For some GC operations, we only want to scan handles in certain generation. To do that, we calculate |
36 | a Mask uint32_t from the original generation uint32_t: |
37 | MaskDWORD = COMPUTE_CLUMP_MASK (GenerationDWORD, BuildAgeMask(generationToScan, MaxGen)) |
38 | so that if a byte in GenerationDWORD is smaller than or equals to generationToScan, the corresponding byte in MaskDWORD is non-zero, |
39 | otherwise it is zero. However, if a byte in GenerationDWORD is between [2, 3E] and generationToScan is 2, the corresponding byte in |
40 | MaskDWORD is also non-zero. |
41 | |
42 | 2. AgeEphemeral. When Ephemeral GC happens, ages for handles which belong to the GC condemned generation should be |
43 | incremented by 1. The operation is done by calculating a new uint32_t using the old uint32_t value: |
44 | NewGenerationDWORD = COMPUTE_AGED_CLUMPS(OldGenerationDWORD, BuildAgeMask(condemnedGeneration, MaxGen)) |
45 | so that if a byte in OldGenerationDWORD is smaller than or equals to condemnedGeneration. the coresponding byte in |
46 | NewGenerationDWORD is 1 bigger than the old value, otherwise it remains unchanged. |
47 | |
48 | 3. Age. Similar as AgeEphemeral, but we use a special mask if condemned generation is max gen (2): |
49 | NewGenerationDWORD = COMPUTE_AGED_CLUMPS(OldGenerationDWORD, GEN_FULLGC) |
50 | under this operation, if a byte in OldGenerationDWORD is bigger than or equals to max gen(2) but smaller than 3F, the corresponding byte in |
51 | NewGenerationDWORD will be incremented by 1. Basically, a handle clump's age could be in [0, 3E]. But from GC's point of view, [2,3E] |
52 | are all considered as gen 2. |
53 | |
54 | If you change any of those algorithm, please verify it by this program: |
55 | |
56 | void Verify () |
57 | { |
58 | //the initial value of each byte is 0xff, which means there's no handle in the clump |
59 | VerifyMaskCalc (0xff, 0xff, 0xff, 0xff, 0); |
60 | VerifyMaskCalc (0xff, 0xff, 0xff, 0xff, 1); |
61 | VerifyMaskCalc (0xff, 0xff, 0xff, 0xff, 2); |
62 | |
63 | VerifyAgeEphemeralCalc (0xff, 0xff, 0xff, 0xff, 0); |
64 | VerifyAgeEphemeralCalc (0xff, 0xff, 0xff, 0xff, 1); |
65 | VerifyAgeCalc (0xff, 0xff, 0xff, 0xff); |
66 | |
67 | //each byte could independently change from 0 to 0x3e |
68 | for (byte b0 = 0; b0 <= 0x3f; b0++) |
69 | { |
70 | for (byte b1 = 0; b1 <= 0x3f; b1++) |
71 | { |
72 | for (byte b2 = 0; b2 <= 0x3f; b2++) |
73 | { |
74 | for (byte b3 = 0; b3 <= 0x3f; b3++) |
75 | { |
76 | //verify we calculate mask correctly |
77 | VerifyMaskCalc (b0, b1, b2, b3, 0); |
78 | VerifyMaskCalc (b0, b1, b2, b3, 1); |
79 | VerifyMaskCalc (b0, b1, b2, b3, 2); |
80 | |
81 | //verify BlockAgeBlocksEphemeral would work correctly |
82 | VerifyAgeEphemeralCalc (b0, b1, b2, b3, 0); |
83 | VerifyAgeEphemeralCalc (b0, b1, b2, b3, 1); |
84 | |
85 | //verify BlockAgeBlock would work correctly |
86 | VerifyAgeCalc (b0, b1, b2, b3); |
87 | } |
88 | } |
89 | } |
90 | } |
91 | } |
92 | |
93 | void VerifyMaskCalc (byte b0, byte b1, byte b2, byte b3, uint gennum) |
94 | { |
95 | uint genDword = (uint)(b0 | b1 << 8 | b2 << 16 | b3 << 24); |
96 | |
97 | uint maskedByGen0 = COMPUTE_CLUMP_MASK(genDword, BuildAgeMask (gennum, 2)); |
98 | byte b0_ = (byte)(maskedByGen0 & 0xff); |
99 | byte b1_ = (byte)((maskedByGen0 & 0xff00) >> 8); |
100 | byte b2_ = (byte)((maskedByGen0 & 0xff0000) >> 16); |
101 | byte b3_ = (byte)((maskedByGen0 & 0xff000000)>> 24); |
102 | |
103 | AssertGenMask (b0, b0_, gennum); |
104 | AssertGenMask (b1, b1_, gennum); |
105 | AssertGenMask (b2, b2_, gennum); |
106 | AssertGenMask (b3, b3_, gennum); |
107 | } |
108 | |
109 | void AssertGenMask (byte gen, byte mask, uint gennum) |
110 | { |
111 | //3f or ff is not a valid generation |
112 | if (gen == 0x3f || gen == 0xff) |
113 | { |
114 | assert (mask == 0); |
115 | return; |
116 | } |
117 | //any generaion bigger than 2 is actually 2 |
118 | if (gen > 2) |
119 | gen = 2; |
120 | |
121 | if (gen <= gennum) |
122 | assert (mask != 0); |
123 | else |
124 | assert (mask == 0); |
125 | } |
126 | |
127 | void VerifyAgeEphemeralCalc (byte b0, byte b1, byte b2, byte b3, uint gennum) |
128 | { |
129 | uint genDword = (uint)(b0 | b1 << 8 | b2 << 16 | b3 << 24); |
130 | |
131 | uint agedClump = COMPUTE_AGED_CLUMPS(genDword, BuildAgeMask (gennum, 2)); |
132 | byte b0_ = (byte)(agedClump & 0xff); |
133 | byte b1_ = (byte)((agedClump & 0xff00) >> 8); |
134 | byte b2_ = (byte)((agedClump & 0xff0000) >> 16); |
135 | byte b3_ = (byte)((agedClump & 0xff000000) >> 24); |
136 | |
137 | AssertAgedClump (b0, b0_, gennum); |
138 | AssertAgedClump (b1, b1_, gennum); |
139 | AssertAgedClump (b2, b2_, gennum); |
140 | AssertAgedClump (b3, b3_, gennum); |
141 | } |
142 | |
143 | void AssertAgedClump (byte gen, byte agedGen, uint gennum) |
144 | { |
145 | //generation will stop growing at 0x3e |
146 | if (gen >= 0x3e) |
147 | { |
148 | assert (agedGen == gen); |
149 | return; |
150 | } |
151 | |
152 | if (gen <= gennum || (gen > 2 && gennum >= 2)) |
153 | assert (agedGen == gen + 1); |
154 | else |
155 | assert (agedGen == gen); |
156 | } |
157 | |
158 | void VerifyAgeCalc (byte b0, byte b1, byte b2, byte b3) |
159 | { |
160 | uint genDword = (uint)(b0 | b1 << 8 | b2 << 16 | b3 << 24); |
161 | |
162 | uint agedClump = COMPUTE_AGED_CLUMPS(genDword, GEN_FULLGC); |
163 | byte b0_ = (byte)(agedClump & 0xff); |
164 | byte b1_ = (byte)((agedClump & 0xff00) >> 8); |
165 | byte b2_ = (byte)((agedClump & 0xff0000) >> 16); |
166 | byte b3_ = (byte)((agedClump & 0xff000000) >> 24); |
167 | |
168 | AssertAgedClump (b0, b0_, 2); |
169 | AssertAgedClump (b1, b1_, 2); |
170 | AssertAgedClump (b2, b2_, 2); |
171 | AssertAgedClump (b3, b3_, 2); |
172 | } |
173 | */ |
174 | |
175 | #define GEN_MAX_AGE (0x3F) |
176 | #define GEN_CLAMP (0x3F3F3F3F) |
177 | #define GEN_AGE_LIMIT (0x3E3E3E3E) |
178 | #define GEN_INVALID (0xC0C0C0C0) |
179 | #define GEN_FILL (0x80808080) |
180 | #define GEN_MASK (0x40404040) |
181 | #define GEN_INC_SHIFT (6) |
182 | |
183 | #define PREFOLD_FILL_INTO_AGEMASK(msk) (1 + (msk) + (~GEN_FILL)) |
184 | |
185 | #define GEN_FULLGC PREFOLD_FILL_INTO_AGEMASK(GEN_AGE_LIMIT) |
186 | |
187 | #define MAKE_CLUMP_MASK_ADDENDS(bytes) (bytes >> GEN_INC_SHIFT) |
188 | #define APPLY_CLUMP_ADDENDS(gen, addend) (gen + addend) |
189 | |
190 | #define COMPUTE_CLUMP_MASK(gen, msk) (((gen & GEN_CLAMP) - msk) & GEN_MASK) |
191 | #define COMPUTE_CLUMP_ADDENDS(gen, msk) MAKE_CLUMP_MASK_ADDENDS(COMPUTE_CLUMP_MASK(gen, msk)) |
192 | #define COMPUTE_AGED_CLUMPS(gen, msk) APPLY_CLUMP_ADDENDS(gen, COMPUTE_CLUMP_ADDENDS(gen, msk)) |
193 | |
194 | /*--------------------------------------------------------------------------*/ |
195 | |
196 | |
197 | |
198 | /**************************************************************************** |
199 | * |
200 | * SUPPORT STRUCTURES FOR ASYNCHRONOUS SCANNING |
201 | * |
202 | ****************************************************************************/ |
203 | |
204 | /* |
205 | * ScanRange |
206 | * |
207 | * Specifies a range of blocks for scanning. |
208 | * |
209 | */ |
210 | struct ScanRange |
211 | { |
212 | /* |
213 | * Start Index |
214 | * |
215 | * Specifies the first block in the range. |
216 | */ |
217 | uint32_t uIndex; |
218 | |
219 | /* |
220 | * Count |
221 | * |
222 | * Specifies the number of blocks in the range. |
223 | */ |
224 | uint32_t uCount; |
225 | }; |
226 | |
227 | |
228 | /* |
229 | * ScanQNode |
230 | * |
231 | * Specifies a set of block ranges in a scan queue. |
232 | * |
233 | */ |
234 | struct ScanQNode |
235 | { |
236 | /* |
237 | * Next Node |
238 | * |
239 | * Specifies the next node in a scan list. |
240 | */ |
241 | struct ScanQNode *pNext; |
242 | |
243 | /* |
244 | * Entry Count |
245 | * |
246 | * Specifies how many entries in this block are valid. |
247 | */ |
248 | uint32_t uEntries; |
249 | |
250 | /* |
251 | * Range Entries |
252 | * |
253 | * Each entry specifies a range of blocks to process. |
254 | */ |
255 | ScanRange rgRange[HANDLE_BLOCKS_PER_SEGMENT / 4]; |
256 | }; |
257 | |
258 | /*--------------------------------------------------------------------------*/ |
259 | |
260 | |
261 | |
262 | /**************************************************************************** |
263 | * |
264 | * MISCELLANEOUS HELPER ROUTINES AND DEFINES |
265 | * |
266 | ****************************************************************************/ |
267 | |
268 | /* |
269 | * INCLUSION_MAP_SIZE |
270 | * |
271 | * Number of elements in a type inclusion map. |
272 | * |
273 | */ |
274 | #define INCLUSION_MAP_SIZE (HANDLE_MAX_INTERNAL_TYPES + 1) |
275 | |
276 | |
277 | /* |
278 | * BuildInclusionMap |
279 | * |
280 | * Creates an inclusion map for the specified type array. |
281 | * |
282 | */ |
283 | void BuildInclusionMap(BOOL *rgTypeInclusion, const uint32_t *puType, uint32_t uTypeCount) |
284 | { |
285 | LIMITED_METHOD_CONTRACT; |
286 | |
287 | // by default, no types are scanned |
288 | ZeroMemory(rgTypeInclusion, INCLUSION_MAP_SIZE * sizeof(BOOL)); |
289 | |
290 | // add the specified types to the inclusion map |
291 | for (uint32_t u = 0; u < uTypeCount; u++) |
292 | { |
293 | // fetch a type we are supposed to scan |
294 | uint32_t uType = puType[u]; |
295 | |
296 | // hope we aren't about to trash the stack :) |
297 | _ASSERTE(uType < HANDLE_MAX_INTERNAL_TYPES); |
298 | |
299 | // add this type to the inclusion map |
300 | rgTypeInclusion[uType + 1] = TRUE; |
301 | } |
302 | } |
303 | |
304 | |
305 | /* |
306 | * IsBlockIncluded |
307 | * |
308 | * Checks a type inclusion map for the inclusion of a particular block. |
309 | * |
310 | */ |
311 | __inline BOOL IsBlockIncluded(TableSegment *pSegment, uint32_t uBlock, const BOOL *rgTypeInclusion) |
312 | { |
313 | LIMITED_METHOD_CONTRACT; |
314 | |
315 | // fetch the adjusted type for this block |
316 | uint32_t uType = (uint32_t)(((int)(signed char)pSegment->rgBlockType[uBlock]) + 1); |
317 | |
318 | // hope the adjusted type was valid |
319 | _ASSERTE(uType <= HANDLE_MAX_INTERNAL_TYPES); |
320 | |
321 | // return the inclusion value for the block's type |
322 | return rgTypeInclusion[uType]; |
323 | } |
324 | |
325 | |
326 | /* |
327 | * TypesRequireUserDataScanning |
328 | * |
329 | * Determines whether the set of types listed should get user data during scans |
330 | * |
331 | * if ALL types passed have user data then this function will enable user data support |
332 | * otherwise it will disable user data support |
333 | * |
334 | * IN OTHER WORDS, SCANNING WITH A MIX OF USER-DATA AND NON-USER-DATA TYPES IS NOT SUPPORTED |
335 | * |
336 | */ |
337 | BOOL TypesRequireUserDataScanning(HandleTable *pTable, const uint32_t *types, uint32_t typeCount) |
338 | { |
339 | WRAPPER_NO_CONTRACT; |
340 | |
341 | // count up the number of types passed that have user data associated |
342 | uint32_t userDataCount = 0; |
343 | for (uint32_t u = 0; u < typeCount; u++) |
344 | { |
345 | if (TypeHasUserData(pTable, types[u])) |
346 | userDataCount++; |
347 | } |
348 | |
349 | // if all have user data then we can enum user data |
350 | if (userDataCount == typeCount) |
351 | return TRUE; |
352 | |
353 | // WARNING: user data is all or nothing in scanning!!! |
354 | // since we have some types which don't support user data, we can't use the user data scanning code |
355 | // this means all callbacks will get NULL for user data!!!!! |
356 | _ASSERTE(userDataCount == 0); |
357 | |
358 | // no user data |
359 | return FALSE; |
360 | } |
361 | |
362 | /* |
363 | * BuildAgeMask |
364 | * |
365 | * Builds an age mask to be used when examining/updating the write barrier. |
366 | * |
367 | */ |
368 | uint32_t BuildAgeMask(uint32_t uGen, uint32_t uMaxGen) |
369 | { |
370 | LIMITED_METHOD_CONTRACT; |
371 | |
372 | // an age mask is composed of repeated bytes containing the next older generation |
373 | |
374 | if (uGen == uMaxGen) |
375 | uGen = GEN_MAX_AGE; |
376 | |
377 | uGen++; |
378 | |
379 | // clamp the generation to the maximum age we support in our macros |
380 | if (uGen > GEN_MAX_AGE) |
381 | uGen = GEN_MAX_AGE; |
382 | |
383 | // pack up a word with age bytes and fill bytes pre-folded as well |
384 | return PREFOLD_FILL_INTO_AGEMASK(uGen | (uGen << 8) | (uGen << 16) | (uGen << 24)); |
385 | } |
386 | |
387 | /*--------------------------------------------------------------------------*/ |
388 | |
389 | |
390 | |
391 | /**************************************************************************** |
392 | * |
393 | * SYNCHRONOUS HANDLE AND BLOCK SCANNING ROUTINES |
394 | * |
395 | ****************************************************************************/ |
396 | |
397 | /* |
398 | * ARRAYSCANPROC |
399 | * |
400 | * Prototype for callbacks that implement handle array scanning logic. |
401 | * |
402 | */ |
403 | typedef void (CALLBACK *ARRAYSCANPROC)(PTR_UNCHECKED_OBJECTREF pValue, PTR_UNCHECKED_OBJECTREF pLast, |
404 | ScanCallbackInfo *pInfo, uintptr_t *pUserData); |
405 | |
406 | |
407 | /* |
408 | * ScanConsecutiveHandlesWithoutUserData |
409 | * |
410 | * Unconditionally scans a consecutive range of handles. |
411 | * |
412 | * USER DATA PASSED TO CALLBACK PROC IS ALWAYS NULL! |
413 | * |
414 | */ |
415 | void CALLBACK ScanConsecutiveHandlesWithoutUserData(PTR_UNCHECKED_OBJECTREF pValue, |
416 | PTR_UNCHECKED_OBJECTREF pLast, |
417 | ScanCallbackInfo *pInfo, |
418 | uintptr_t *) |
419 | { |
420 | WRAPPER_NO_CONTRACT; |
421 | |
422 | #ifdef _DEBUG |
423 | // update our scanning statistics |
424 | pInfo->DEBUG_HandleSlotsScanned += (int)(pLast - pValue); |
425 | #endif |
426 | |
427 | // get frequently used params into locals |
428 | HANDLESCANPROC pfnScan = pInfo->pfnScan; |
429 | uintptr_t param1 = pInfo->param1; |
430 | uintptr_t param2 = pInfo->param2; |
431 | |
432 | // scan for non-zero handles |
433 | do |
434 | { |
435 | // call the callback for any we find |
436 | if (!HndIsNullOrDestroyedHandle(*pValue)) |
437 | { |
438 | #ifdef _DEBUG |
439 | // update our scanning statistics |
440 | pInfo->DEBUG_HandlesActuallyScanned++; |
441 | #endif |
442 | |
443 | // process this handle |
444 | pfnScan(pValue, NULL, param1, param2); |
445 | } |
446 | |
447 | // on to the next handle |
448 | pValue++; |
449 | |
450 | } while (pValue < pLast); |
451 | } |
452 | |
453 | |
454 | /* |
455 | * ScanConsecutiveHandlesWithUserData |
456 | * |
457 | * Unconditionally scans a consecutive range of handles. |
458 | * |
459 | * USER DATA IS ASSUMED TO BE CONSECUTIVE! |
460 | * |
461 | */ |
462 | void CALLBACK ScanConsecutiveHandlesWithUserData(PTR_UNCHECKED_OBJECTREF pValue, |
463 | PTR_UNCHECKED_OBJECTREF pLast, |
464 | ScanCallbackInfo *pInfo, |
465 | uintptr_t *pUserData) |
466 | { |
467 | WRAPPER_NO_CONTRACT; |
468 | |
469 | #ifdef _DEBUG |
470 | // this function will crash if it is passed bad extra info |
471 | _ASSERTE(pUserData); |
472 | |
473 | // update our scanning statistics |
474 | pInfo->DEBUG_HandleSlotsScanned += (int)(pLast - pValue); |
475 | #endif |
476 | |
477 | // get frequently used params into locals |
478 | HANDLESCANPROC pfnScan = pInfo->pfnScan; |
479 | uintptr_t param1 = pInfo->param1; |
480 | uintptr_t param2 = pInfo->param2; |
481 | |
482 | // scan for non-zero handles |
483 | do |
484 | { |
485 | // call the callback for any we find |
486 | if (!HndIsNullOrDestroyedHandle(*pValue)) |
487 | { |
488 | #ifdef _DEBUG |
489 | // update our scanning statistics |
490 | pInfo->DEBUG_HandlesActuallyScanned++; |
491 | #endif |
492 | |
493 | // process this handle |
494 | pfnScan(pValue, pUserData, param1, param2); |
495 | } |
496 | |
497 | // on to the next handle |
498 | pValue++; |
499 | pUserData++; |
500 | |
501 | } while (pValue < pLast); |
502 | } |
503 | |
504 | /* |
505 | * BlockAgeBlocks |
506 | * |
507 | * Ages all clumps in a range of consecutive blocks. |
508 | * |
509 | */ |
510 | void CALLBACK BlockAgeBlocks(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo) |
511 | { |
512 | LIMITED_METHOD_CONTRACT; |
513 | UNREFERENCED_PARAMETER(pInfo); |
514 | |
515 | #ifdef DACCESS_COMPILE |
516 | UNREFERENCED_PARAMETER(pSegment); |
517 | UNREFERENCED_PARAMETER(uBlock); |
518 | UNREFERENCED_PARAMETER(uCount); |
519 | #else |
520 | // set up to update the specified blocks |
521 | uint32_t *pdwGen = (uint32_t *)pSegment->rgGeneration + uBlock; |
522 | uint32_t *pdwGenLast = pdwGen + uCount; |
523 | |
524 | // loop over all the blocks, aging their clumps as we go |
525 | do |
526 | { |
527 | // compute and store the new ages in parallel |
528 | *pdwGen = COMPUTE_AGED_CLUMPS(*pdwGen, GEN_FULLGC); |
529 | |
530 | } while (++pdwGen < pdwGenLast); |
531 | #endif |
532 | } |
533 | |
534 | /* |
535 | * BlockScanBlocksWithoutUserData |
536 | * |
537 | * Calls the specified callback once for each handle in a range of blocks, |
538 | * optionally aging the corresponding generation clumps. |
539 | * |
540 | */ |
541 | void CALLBACK BlockScanBlocksWithoutUserData(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo) |
542 | { |
543 | LIMITED_METHOD_CONTRACT; |
544 | |
545 | #ifndef DACCESS_COMPILE |
546 | // get the first and limit handles for these blocks |
547 | _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK); |
548 | _UNCHECKED_OBJECTREF *pLast = pValue + (uCount * HANDLE_HANDLES_PER_BLOCK); |
549 | #else |
550 | PTR_UNCHECKED_OBJECTREF pValue = dac_cast<PTR_UNCHECKED_OBJECTREF>(PTR_HOST_MEMBER_TADDR(TableSegment, pSegment, rgValue)) |
551 | + (uBlock * HANDLE_HANDLES_PER_BLOCK); |
552 | PTR_UNCHECKED_OBJECTREF pLast = pValue + (uCount * HANDLE_HANDLES_PER_BLOCK); |
553 | #endif |
554 | |
555 | // scan the specified handles |
556 | ScanConsecutiveHandlesWithoutUserData(pValue, pLast, pInfo, NULL); |
557 | |
558 | // optionally update the clump generations for these blocks too |
559 | if (pInfo->uFlags & HNDGCF_AGE) |
560 | BlockAgeBlocks(pSegment, uBlock, uCount, pInfo); |
561 | |
562 | #ifdef _DEBUG |
563 | // update our scanning statistics |
564 | pInfo->DEBUG_BlocksScannedNonTrivially += uCount; |
565 | pInfo->DEBUG_BlocksScanned += uCount; |
566 | #endif |
567 | } |
568 | |
569 | |
570 | /* |
571 | * BlockScanBlocksWithUserData |
572 | * |
573 | * Calls the specified callback once for each handle in a range of blocks, |
574 | * optionally aging the corresponding generation clumps. |
575 | * |
576 | */ |
577 | void CALLBACK BlockScanBlocksWithUserData(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo) |
578 | { |
579 | LIMITED_METHOD_CONTRACT; |
580 | |
581 | // iterate individual blocks scanning with user data |
582 | for (uint32_t u = 0; u < uCount; u++) |
583 | { |
584 | // compute the current block |
585 | uint32_t uCur = (u + uBlock); |
586 | |
587 | // fetch the user data for this block |
588 | uintptr_t *pUserData = BlockFetchUserDataPointer(PTR__TableSegmentHeader(pSegment), uCur, TRUE); |
589 | |
590 | #ifndef DACCESS_COMPILE |
591 | // get the first and limit handles for these blocks |
592 | _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uCur * HANDLE_HANDLES_PER_BLOCK); |
593 | _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_BLOCK; |
594 | #else |
595 | PTR_UNCHECKED_OBJECTREF pValue = dac_cast<PTR_UNCHECKED_OBJECTREF>(PTR_HOST_MEMBER_TADDR(TableSegment, pSegment, rgValue)) |
596 | + (uCur * HANDLE_HANDLES_PER_BLOCK); |
597 | PTR_UNCHECKED_OBJECTREF pLast = pValue + HANDLE_HANDLES_PER_BLOCK; |
598 | #endif |
599 | |
600 | // scan the handles in this block |
601 | ScanConsecutiveHandlesWithUserData(pValue, pLast, pInfo, pUserData); |
602 | } |
603 | |
604 | // optionally update the clump generations for these blocks too |
605 | if (pInfo->uFlags & HNDGCF_AGE) |
606 | BlockAgeBlocks(pSegment, uBlock, uCount, pInfo); |
607 | |
608 | #ifdef _DEBUG |
609 | // update our scanning statistics |
610 | pInfo->DEBUG_BlocksScannedNonTrivially += uCount; |
611 | pInfo->DEBUG_BlocksScanned += uCount; |
612 | #endif |
613 | } |
614 | |
615 | |
616 | /* |
617 | * BlockScanBlocksEphemeralWorker |
618 | * |
619 | * Calls the specified callback once for each handle in any clump |
620 | * identified by the clump mask in the specified block. |
621 | * |
622 | */ |
623 | void BlockScanBlocksEphemeralWorker(uint32_t *pdwGen, uint32_t dwClumpMask, ScanCallbackInfo *pInfo) |
624 | { |
625 | WRAPPER_NO_CONTRACT; |
626 | |
627 | // |
628 | // OPTIMIZATION: Since we expect to call this worker fairly rarely compared to |
629 | // the number of times we pass through the outer loop, this function intentionally |
630 | // does not take pSegment as a param. |
631 | // |
632 | // We do this so that the compiler won't try to keep pSegment in a register during |
633 | // the outer loop, leaving more registers for the common codepath. |
634 | // |
635 | // You might wonder why this is an issue considering how few locals we have in |
636 | // BlockScanBlocksEphemeral. For some reason the x86 compiler doesn't like to use |
637 | // all the registers during that loop, so a little coaxing was necessary to get |
638 | // the right output. |
639 | // |
640 | |
641 | // fetch the table segment we are working in |
642 | PTR_TableSegment pSegment = pInfo->pCurrentSegment; |
643 | |
644 | // if we should age the clumps then do so now (before we trash dwClumpMask) |
645 | if (pInfo->uFlags & HNDGCF_AGE) |
646 | *pdwGen = APPLY_CLUMP_ADDENDS(*pdwGen, MAKE_CLUMP_MASK_ADDENDS(dwClumpMask)); |
647 | |
648 | // compute the index of the first clump in the block |
649 | uint32_t uClump = (uint32_t)((uint8_t *)pdwGen - pSegment->rgGeneration); |
650 | |
651 | #ifndef DACCESS_COMPILE |
652 | // compute the first handle in the first clump of this block |
653 | _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uClump * HANDLE_HANDLES_PER_CLUMP); |
654 | #else |
655 | PTR_UNCHECKED_OBJECTREF pValue = dac_cast<PTR_UNCHECKED_OBJECTREF>(PTR_HOST_MEMBER_TADDR(TableSegment, pSegment, rgValue)) |
656 | + (uClump * HANDLE_HANDLES_PER_CLUMP); |
657 | #endif |
658 | |
659 | // some scans require us to report per-handle extra info - assume this one doesn't |
660 | ARRAYSCANPROC pfnScanHandles = ScanConsecutiveHandlesWithoutUserData; |
661 | uintptr_t *pUserData = NULL; |
662 | |
663 | // do we need to pass user data to the callback? |
664 | if (pInfo->fEnumUserData) |
665 | { |
666 | // scan with user data enabled |
667 | pfnScanHandles = ScanConsecutiveHandlesWithUserData; |
668 | |
669 | // get the first user data slot for this block |
670 | pUserData = BlockFetchUserDataPointer(PTR__TableSegmentHeader(pSegment), (uClump / HANDLE_CLUMPS_PER_BLOCK), TRUE); |
671 | } |
672 | |
673 | // loop over the clumps, scanning those that are identified by the mask |
674 | do |
675 | { |
676 | // compute the last handle in this clump |
677 | PTR_UNCHECKED_OBJECTREF pLast = pValue + HANDLE_HANDLES_PER_CLUMP; |
678 | |
679 | // if this clump should be scanned then scan it |
680 | if (dwClumpMask & GEN_CLUMP_0_MASK) |
681 | pfnScanHandles(pValue, pLast, pInfo, pUserData); |
682 | |
683 | // skip to the next clump |
684 | dwClumpMask = NEXT_CLUMP_IN_MASK(dwClumpMask); |
685 | pValue = pLast; |
686 | pUserData += HANDLE_HANDLES_PER_CLUMP; |
687 | |
688 | } while (dwClumpMask); |
689 | |
690 | #ifdef _DEBUG |
691 | // update our scanning statistics |
692 | pInfo->DEBUG_BlocksScannedNonTrivially++; |
693 | #endif |
694 | } |
695 | |
696 | |
697 | /* |
698 | * BlockScanBlocksEphemeral |
699 | * |
700 | * Calls the specified callback once for each handle from the specified |
701 | * generation in a block. |
702 | * |
703 | */ |
704 | void CALLBACK BlockScanBlocksEphemeral(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo) |
705 | { |
706 | WRAPPER_NO_CONTRACT; |
707 | |
708 | // get frequently used params into locals |
709 | uint32_t dwAgeMask = pInfo->dwAgeMask; |
710 | |
711 | // set up to update the specified blocks |
712 | uint32_t *pdwGen = (uint32_t *)pSegment->rgGeneration + uBlock; |
713 | uint32_t *pdwGenLast = pdwGen + uCount; |
714 | |
715 | // loop over all the blocks, checking for elligible clumps as we go |
716 | do |
717 | { |
718 | // determine if any clumps in this block are elligible |
719 | uint32_t dwClumpMask = COMPUTE_CLUMP_MASK(*pdwGen, dwAgeMask); |
720 | |
721 | // if there are any clumps to scan then scan them now |
722 | if (dwClumpMask) |
723 | { |
724 | // ok we need to scan some parts of this block |
725 | // |
726 | // OPTIMIZATION: Since we expect to call the worker fairly rarely compared |
727 | // to the number of times we pass through the loop, the function below |
728 | // intentionally does not take pSegment as a param. |
729 | // |
730 | // We do this so that the compiler won't try to keep pSegment in a register |
731 | // during our loop, leaving more registers for the common codepath. |
732 | // |
733 | // You might wonder why this is an issue considering how few locals we have |
734 | // here. For some reason the x86 compiler doesn't like to use all the |
735 | // registers available during this loop and instead was hitting the stack |
736 | // repeatedly, so a little coaxing was necessary to get the right output. |
737 | // |
738 | BlockScanBlocksEphemeralWorker(pdwGen, dwClumpMask, pInfo); |
739 | } |
740 | |
741 | // on to the next block's generation info |
742 | pdwGen++; |
743 | |
744 | } while (pdwGen < pdwGenLast); |
745 | |
746 | #ifdef _DEBUG |
747 | // update our scanning statistics |
748 | pInfo->DEBUG_BlocksScanned += uCount; |
749 | #endif |
750 | } |
751 | |
752 | #ifndef DACCESS_COMPILE |
753 | /* |
754 | * BlockAgeBlocksEphemeral |
755 | * |
756 | * Ages all clumps within the specified generation. |
757 | * |
758 | */ |
759 | void CALLBACK BlockAgeBlocksEphemeral(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo) |
760 | { |
761 | LIMITED_METHOD_CONTRACT; |
762 | |
763 | // get frequently used params into locals |
764 | uint32_t dwAgeMask = pInfo->dwAgeMask; |
765 | |
766 | // set up to update the specified blocks |
767 | uint32_t *pdwGen = (uint32_t *)pSegment->rgGeneration + uBlock; |
768 | uint32_t *pdwGenLast = pdwGen + uCount; |
769 | |
770 | // loop over all the blocks, aging their clumps as we go |
771 | do |
772 | { |
773 | // compute and store the new ages in parallel |
774 | *pdwGen = COMPUTE_AGED_CLUMPS(*pdwGen, dwAgeMask); |
775 | |
776 | } while (++pdwGen < pdwGenLast); |
777 | } |
778 | |
779 | /* |
780 | * BlockResetAgeMapForBlocksWorker |
781 | * |
782 | * Figures out the minimum age of the objects referred to by the handles in any clump |
783 | * identified by the clump mask in the specified block. |
784 | * |
785 | */ |
786 | void BlockResetAgeMapForBlocksWorker(uint32_t *pdwGen, uint32_t dwClumpMask, ScanCallbackInfo *pInfo) |
787 | { |
788 | STATIC_CONTRACT_NOTHROW; |
789 | STATIC_CONTRACT_GC_NOTRIGGER; |
790 | STATIC_CONTRACT_SO_TOLERANT; |
791 | STATIC_CONTRACT_MODE_COOPERATIVE; |
792 | |
793 | // fetch the table segment we are working in |
794 | TableSegment *pSegment = pInfo->pCurrentSegment; |
795 | |
796 | // compute the index of the first clump in the block |
797 | uint32_t uClump = (uint32_t)((uint8_t *)pdwGen - pSegment->rgGeneration); |
798 | |
799 | // compute the first handle in the first clump of this block |
800 | _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uClump * HANDLE_HANDLES_PER_CLUMP); |
801 | |
802 | // loop over the clumps, scanning those that are identified by the mask |
803 | do |
804 | { |
805 | // compute the last handle in this clump |
806 | _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_CLUMP; |
807 | |
808 | // if this clump should be scanned then scan it |
809 | if (dwClumpMask & GEN_CLUMP_0_MASK) |
810 | { |
811 | // for each clump, determine the minimum age of the objects pointed at. |
812 | int minAge = GEN_MAX_AGE; |
813 | for ( ; pValue < pLast; pValue++) |
814 | { |
815 | if (!HndIsNullOrDestroyedHandle(*pValue)) |
816 | { |
817 | int thisAge = g_theGCHeap->WhichGeneration(*pValue); |
818 | if (minAge > thisAge) |
819 | minAge = thisAge; |
820 | |
821 | GCToEEInterface::WalkAsyncPinned(*pValue, &minAge, |
822 | [](Object*, Object* to, void* ctx) |
823 | { |
824 | int* minAge = reinterpret_cast<int*>(ctx); |
825 | int generation = g_theGCHeap->WhichGeneration(to); |
826 | if (*minAge > generation) |
827 | { |
828 | *minAge = generation; |
829 | } |
830 | }); |
831 | } |
832 | } |
833 | _ASSERTE(FitsInU1(minAge)); |
834 | ((uint8_t *)pSegment->rgGeneration)[uClump] = static_cast<uint8_t>(minAge); |
835 | } |
836 | // skip to the next clump |
837 | dwClumpMask = NEXT_CLUMP_IN_MASK(dwClumpMask); |
838 | pValue = pLast; |
839 | uClump++; |
840 | } while (dwClumpMask); |
841 | } |
842 | |
843 | |
844 | /* |
845 | * BlockResetAgeMapForBlocks |
846 | * |
847 | * Sets the age maps for a range of blocks. Called in the case of demotion. Even in this case |
848 | * though, most handles refer to objects that don't get demoted and that need to be aged therefore. |
849 | * |
850 | */ |
851 | void CALLBACK BlockResetAgeMapForBlocks(TableSegment *pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo) |
852 | { |
853 | WRAPPER_NO_CONTRACT; |
854 | |
855 | #if 0 |
856 | // zero the age map for the specified range of blocks |
857 | ZeroMemory((uint32_t *)pSegment->rgGeneration + uBlock, uCount * sizeof(uint32_t)); |
858 | #else |
859 | // Actually, we need to be more sophisticated than the above code - there are scenarios |
860 | // where there is demotion in almost every gc cycle, so none of handles get |
861 | // aged appropriately. |
862 | |
863 | // get frequently used params into locals |
864 | uint32_t dwAgeMask = pInfo->dwAgeMask; |
865 | |
866 | // set up to update the specified blocks |
867 | uint32_t *pdwGen = (uint32_t *)pSegment->rgGeneration + uBlock; |
868 | uint32_t *pdwGenLast = pdwGen + uCount; |
869 | |
870 | // loop over all the blocks, checking for eligible clumps as we go |
871 | do |
872 | { |
873 | // determine if any clumps in this block are eligible |
874 | uint32_t dwClumpMask = COMPUTE_CLUMP_MASK(*pdwGen, dwAgeMask); |
875 | |
876 | // if there are any clumps to scan then scan them now |
877 | if (dwClumpMask) |
878 | { |
879 | // ok we need to scan some parts of this block |
880 | // This code is a variation of the code in BlockScanBlocksEphemeral, |
881 | // so the OPTIMIZATION comment there applies here as well |
882 | BlockResetAgeMapForBlocksWorker(pdwGen, dwClumpMask, pInfo); |
883 | } |
884 | |
885 | // on to the next block's generation info |
886 | pdwGen++; |
887 | |
888 | } while (pdwGen < pdwGenLast); |
889 | #endif |
890 | } |
891 | |
892 | static void VerifyObject(_UNCHECKED_OBJECTREF from, _UNCHECKED_OBJECTREF obj) |
893 | { |
894 | #if defined(FEATURE_REDHAWK) || defined(BUILD_AS_STANDALONE) |
895 | UNREFERENCED_PARAMETER(from); |
896 | MethodTable* pMT = (MethodTable*)(obj->GetGCSafeMethodTable()); |
897 | pMT->SanityCheck(); |
898 | #else |
899 | obj->ValidateHeap(from); |
900 | #endif // FEATURE_REDHAWK |
901 | } |
902 | |
903 | static void VerifyObjectAndAge(_UNCHECKED_OBJECTREF from, _UNCHECKED_OBJECTREF obj, uint8_t minAge) |
904 | { |
905 | VerifyObject(from, obj); |
906 | |
907 | int thisAge = g_theGCHeap->WhichGeneration(obj); |
908 | |
909 | //debugging code |
910 | //if (minAge > thisAge && thisAge < g_theGCHeap->GetMaxGeneration()) |
911 | //{ |
912 | // if ((*pValue) == obj) |
913 | // printf("Handle (age %u) %p -> %p (age %u)", minAge, pValue, obj, thisAge); |
914 | // else |
915 | // printf("Handle (age %u) %p -> %p -> %p (age %u)", minAge, pValue, from, obj, thisAge); |
916 | |
917 | // // for test programs - if the object is a string, print it |
918 | // if (obj->GetGCSafeMethodTable() == g_pStringClass) |
919 | // { |
920 | // printf("'%ls'\n", ((StringObject *)obj)->GetBuffer()); |
921 | // } |
922 | // else |
923 | // { |
924 | // printf("\n"); |
925 | // } |
926 | //} |
927 | |
928 | if (minAge >= GEN_MAX_AGE || (minAge > thisAge && thisAge < static_cast<int>(g_theGCHeap->GetMaxGeneration()))) |
929 | { |
930 | _ASSERTE(!"Fatal Error in HandleTable." ); |
931 | GCToEEInterface::HandleFatalError(COR_E_EXECUTIONENGINE); |
932 | } |
933 | } |
934 | |
935 | /* |
936 | * BlockVerifyAgeMapForBlocksWorker |
937 | * |
938 | * Verifies out the minimum age of the objects referred to by the handles in any clump |
939 | * identified by the clump mask in the specified block. |
940 | * Also validates the objects themselves. |
941 | * |
942 | */ |
943 | void BlockVerifyAgeMapForBlocksWorker(uint32_t *pdwGen, uint32_t dwClumpMask, ScanCallbackInfo *pInfo, uint32_t uType) |
944 | { |
945 | WRAPPER_NO_CONTRACT; |
946 | |
947 | // fetch the table segment we are working in |
948 | TableSegment *pSegment = pInfo->pCurrentSegment; |
949 | |
950 | // compute the index of the first clump in the block |
951 | uint32_t uClump = (uint32_t)((uint8_t *)pdwGen - pSegment->rgGeneration); |
952 | |
953 | // compute the first handle in the first clump of this block |
954 | _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uClump * HANDLE_HANDLES_PER_CLUMP); |
955 | |
956 | // loop over the clumps, scanning those that are identified by the mask |
957 | do |
958 | { |
959 | // compute the last handle in this clump |
960 | _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_CLUMP; |
961 | |
962 | // if this clump should be scanned then scan it |
963 | if (dwClumpMask & GEN_CLUMP_0_MASK) |
964 | { |
965 | // for each clump, check whether any object is younger than the age indicated by the clump |
966 | uint8_t minAge = ((uint8_t *)pSegment->rgGeneration)[uClump]; |
967 | for ( ; pValue < pLast; pValue++) |
968 | { |
969 | if (!HndIsNullOrDestroyedHandle(*pValue)) |
970 | { |
971 | VerifyObjectAndAge((*pValue), (*pValue), minAge); |
972 | GCToEEInterface::WalkAsyncPinned(*pValue, &minAge, |
973 | [](Object* from, Object* object, void* age) |
974 | { |
975 | uint8_t* minAge = reinterpret_cast<uint8_t*>(age); |
976 | VerifyObjectAndAge(from, object, *minAge); |
977 | }); |
978 | |
979 | if (uType == HNDTYPE_DEPENDENT) |
980 | { |
981 | PTR_uintptr_t pUserData = HandleQuickFetchUserDataPointer((OBJECTHANDLE)pValue); |
982 | |
983 | // if we did then copy the value |
984 | if (pUserData) |
985 | { |
986 | _UNCHECKED_OBJECTREF pSecondary = (_UNCHECKED_OBJECTREF)(*pUserData); |
987 | if (pSecondary) |
988 | { |
989 | VerifyObject(pSecondary, pSecondary); |
990 | } |
991 | } |
992 | } |
993 | } |
994 | } |
995 | } |
996 | // else |
997 | // printf("skipping clump with age %x\n", ((uint8_t *)pSegment->rgGeneration)[uClump]); |
998 | |
999 | // skip to the next clump |
1000 | dwClumpMask = NEXT_CLUMP_IN_MASK(dwClumpMask); |
1001 | pValue = pLast; |
1002 | uClump++; |
1003 | } while (dwClumpMask); |
1004 | } |
1005 | |
1006 | /* |
1007 | * BlockVerifyAgeMapForBlocks |
1008 | * |
1009 | * Sets the age maps for a range of blocks. Called in the case of demotion. Even in this case |
1010 | * though, most handles refer to objects that don't get demoted and that need to be aged therefore. |
1011 | * |
1012 | */ |
1013 | void CALLBACK BlockVerifyAgeMapForBlocks(TableSegment *pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo) |
1014 | { |
1015 | WRAPPER_NO_CONTRACT; |
1016 | |
1017 | for (uint32_t u = 0; u < uCount; u++) |
1018 | { |
1019 | uint32_t uCur = (u + uBlock); |
1020 | |
1021 | uint32_t *pdwGen = (uint32_t *)pSegment->rgGeneration + uCur; |
1022 | |
1023 | uint32_t uType = pSegment->rgBlockType[uCur]; |
1024 | |
1025 | BlockVerifyAgeMapForBlocksWorker(pdwGen, 0xFFFFFFFF, pInfo, uType); |
1026 | } |
1027 | } |
1028 | |
1029 | /* |
1030 | * BlockLockBlocks |
1031 | * |
1032 | * Locks all blocks in the specified range. |
1033 | * |
1034 | */ |
1035 | void CALLBACK BlockLockBlocks(TableSegment *pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *) |
1036 | { |
1037 | WRAPPER_NO_CONTRACT; |
1038 | |
1039 | // loop over the blocks in the specified range and lock them |
1040 | for (uCount += uBlock; uBlock < uCount; uBlock++) |
1041 | BlockLock(pSegment, uBlock); |
1042 | } |
1043 | |
1044 | |
1045 | /* |
1046 | * BlockUnlockBlocks |
1047 | * |
1048 | * Unlocks all blocks in the specified range. |
1049 | * |
1050 | */ |
1051 | void CALLBACK BlockUnlockBlocks(TableSegment *pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *) |
1052 | { |
1053 | WRAPPER_NO_CONTRACT; |
1054 | |
1055 | // loop over the blocks in the specified range and unlock them |
1056 | for (uCount += uBlock; uBlock < uCount; uBlock++) |
1057 | BlockUnlock(pSegment, uBlock); |
1058 | } |
1059 | #endif // !DACCESS_COMPILE |
1060 | |
1061 | /* |
1062 | * BlockQueueBlocksForAsyncScan |
1063 | * |
1064 | * Queues the specified blocks to be scanned asynchronously. |
1065 | * |
1066 | */ |
1067 | void CALLBACK BlockQueueBlocksForAsyncScan(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *) |
1068 | { |
1069 | CONTRACTL |
1070 | { |
1071 | NOTHROW; |
1072 | WRAPPER(GC_TRIGGERS); |
1073 | } |
1074 | CONTRACTL_END; |
1075 | |
1076 | // fetch our async scan information |
1077 | AsyncScanInfo *pAsyncInfo = pSegment->pHandleTable->pAsyncScanInfo; |
1078 | |
1079 | // sanity |
1080 | _ASSERTE(pAsyncInfo); |
1081 | |
1082 | // fetch the current queue tail |
1083 | ScanQNode *pQNode = pAsyncInfo->pQueueTail; |
1084 | |
1085 | // did we get a tail? |
1086 | if (pQNode) |
1087 | { |
1088 | // we got an existing tail - is the tail node full already? |
1089 | if (pQNode->uEntries >= _countof(pQNode->rgRange)) |
1090 | { |
1091 | // the node is full - is there another node in the queue? |
1092 | if (!pQNode->pNext) |
1093 | { |
1094 | // no more nodes - allocate a new one |
1095 | ScanQNode *pQNodeT = new (nothrow) ScanQNode(); |
1096 | |
1097 | // did it succeed? |
1098 | if (!pQNodeT) |
1099 | { |
1100 | // |
1101 | // We couldn't allocate another queue node. |
1102 | // |
1103 | // THIS IS NOT FATAL IF ASYNCHRONOUS SCANNING IS BEING USED PROPERLY |
1104 | // |
1105 | // The reason we can survive this is that asynchronous scans are not |
1106 | // guaranteed to enumerate all handles anyway. Since the table can |
1107 | // change while the lock is released, the caller may assume only that |
1108 | // asynchronous scanning will enumerate a reasonably high percentage |
1109 | // of the handles requested, most of the time. |
1110 | // |
1111 | // The typical use of an async scan is to process as many handles as |
1112 | // possible asynchronously, so as to reduce the amount of time spent |
1113 | // in the inevitable synchronous scan that follows. |
1114 | // |
1115 | // As a practical example, the Concurrent Mark phase of garbage |
1116 | // collection marks as many objects as possible asynchronously, and |
1117 | // subsequently performs a normal, synchronous mark to catch the |
1118 | // stragglers. Since most of the reachable objects in the heap are |
1119 | // already marked at this point, the synchronous scan ends up doing |
1120 | // very little work. |
1121 | // |
1122 | // So the moral of the story is that yes, we happily drop some of |
1123 | // your blocks on the floor in this out of memory case, and that's |
1124 | // BY DESIGN. |
1125 | // |
1126 | LOG((LF_GC, LL_WARNING, "WARNING: Out of memory queueing for async scan. Some blocks skipped.\n" )); |
1127 | return; |
1128 | } |
1129 | |
1130 | memset (pQNodeT, 0, sizeof(ScanQNode)); |
1131 | |
1132 | // link the new node into the queue |
1133 | pQNode->pNext = pQNodeT; |
1134 | } |
1135 | |
1136 | // either way, use the next node in the queue |
1137 | pQNode = pQNode->pNext; |
1138 | } |
1139 | } |
1140 | else |
1141 | { |
1142 | // no tail - this is a brand new queue; start the tail at the head node |
1143 | pQNode = pAsyncInfo->pScanQueue; |
1144 | } |
1145 | |
1146 | // we will be using the last slot after the existing entries |
1147 | uint32_t uSlot = pQNode->uEntries; |
1148 | |
1149 | // fetch the slot where we will be storing the new block range |
1150 | ScanRange *pNewRange = pQNode->rgRange + uSlot; |
1151 | |
1152 | // update the entry count in the node |
1153 | pQNode->uEntries = uSlot + 1; |
1154 | |
1155 | // fill in the new slot with the block range info |
1156 | pNewRange->uIndex = uBlock; |
1157 | pNewRange->uCount = uCount; |
1158 | |
1159 | // remember the last block we stored into as the new queue tail |
1160 | pAsyncInfo->pQueueTail = pQNode; |
1161 | } |
1162 | |
1163 | /*--------------------------------------------------------------------------*/ |
1164 | |
1165 | |
1166 | |
1167 | /**************************************************************************** |
1168 | * |
1169 | * ASYNCHRONOUS SCANNING WORKERS AND CALLBACKS |
1170 | * |
1171 | ****************************************************************************/ |
1172 | |
1173 | /* |
1174 | * QNODESCANPROC |
1175 | * |
1176 | * Prototype for callbacks that implement per ScanQNode scanning logic. |
1177 | * |
1178 | */ |
1179 | typedef void (CALLBACK *QNODESCANPROC)(AsyncScanInfo *pAsyncInfo, ScanQNode *pQNode, uintptr_t lParam); |
1180 | |
1181 | |
1182 | /* |
1183 | * ProcessScanQueue |
1184 | * |
1185 | * Calls the specified handler once for each node in a scan queue. |
1186 | * |
1187 | */ |
1188 | void ProcessScanQueue(AsyncScanInfo *pAsyncInfo, QNODESCANPROC pfnNodeHandler, uintptr_t lParam, BOOL fCountEmptyQNodes) |
1189 | { |
1190 | WRAPPER_NO_CONTRACT; |
1191 | |
1192 | if (pAsyncInfo->pQueueTail == NULL && fCountEmptyQNodes == FALSE) |
1193 | return; |
1194 | |
1195 | // if any entries were added to the block list after our initial node, clean them up now |
1196 | ScanQNode *pQNode = pAsyncInfo->pScanQueue; |
1197 | while (pQNode) |
1198 | { |
1199 | // remember the next node |
1200 | ScanQNode *pNext = pQNode->pNext; |
1201 | |
1202 | // call the handler for the current node and then advance to the next |
1203 | pfnNodeHandler(pAsyncInfo, pQNode, lParam); |
1204 | pQNode = pNext; |
1205 | } |
1206 | } |
1207 | |
1208 | |
1209 | /* |
1210 | * ProcessScanQNode |
1211 | * |
1212 | * Calls the specified block handler once for each range of blocks in a ScanQNode. |
1213 | * |
1214 | */ |
1215 | void CALLBACK ProcessScanQNode(AsyncScanInfo *pAsyncInfo, ScanQNode *pQNode, uintptr_t lParam) |
1216 | { |
1217 | WRAPPER_NO_CONTRACT; |
1218 | |
1219 | // get the block handler from our lParam |
1220 | BLOCKSCANPROC pfnBlockHandler = (BLOCKSCANPROC)lParam; |
1221 | |
1222 | // fetch the params we will be passing to the handler |
1223 | ScanCallbackInfo *pCallbackInfo = pAsyncInfo->pCallbackInfo; |
1224 | PTR_TableSegment pSegment = pCallbackInfo->pCurrentSegment; |
1225 | |
1226 | // set up to iterate the ranges in the queue node |
1227 | ScanRange *pRange = pQNode->rgRange; |
1228 | ScanRange *pRangeLast = pRange + pQNode->uEntries; |
1229 | |
1230 | // loop over all the ranges, calling the block handler for each one |
1231 | while (pRange < pRangeLast) { |
1232 | // call the block handler with the current block range |
1233 | pfnBlockHandler(pSegment, pRange->uIndex, pRange->uCount, pCallbackInfo); |
1234 | |
1235 | // advance to the next range |
1236 | pRange++; |
1237 | |
1238 | } |
1239 | } |
1240 | |
1241 | #ifndef DACCESS_COMPILE |
1242 | /* |
1243 | * UnlockAndForgetQueuedBlocks |
1244 | * |
1245 | * Unlocks all blocks referenced in the specified node and marks the node as empty. |
1246 | * |
1247 | */ |
1248 | void CALLBACK UnlockAndForgetQueuedBlocks(AsyncScanInfo *pAsyncInfo, ScanQNode *pQNode, uintptr_t) |
1249 | { |
1250 | WRAPPER_NO_CONTRACT; |
1251 | |
1252 | // unlock the blocks named in this node |
1253 | ProcessScanQNode(pAsyncInfo, pQNode, (uintptr_t)BlockUnlockBlocks); |
1254 | |
1255 | // reset the node so it looks empty |
1256 | pQNode->uEntries = 0; |
1257 | } |
1258 | #endif |
1259 | |
1260 | /* |
1261 | * FreeScanQNode |
1262 | * |
1263 | * Frees the specified ScanQNode |
1264 | * |
1265 | */ |
1266 | void CALLBACK FreeScanQNode(AsyncScanInfo *, ScanQNode *pQNode, uintptr_t) |
1267 | { |
1268 | LIMITED_METHOD_CONTRACT; |
1269 | |
1270 | // free the node's memory |
1271 | delete pQNode; |
1272 | } |
1273 | |
1274 | |
1275 | /* |
1276 | * xxxTableScanQueuedBlocksAsync |
1277 | * |
1278 | * Performs and asynchronous scan of the queued blocks for the specified segment. |
1279 | * |
1280 | * N.B. THIS FUNCTION LEAVES THE TABLE LOCK WHILE SCANNING. |
1281 | * |
1282 | */ |
1283 | void xxxTableScanQueuedBlocksAsync(PTR_HandleTable pTable, PTR_TableSegment pSegment, CrstHolderWithState *pCrstHolder) |
1284 | { |
1285 | WRAPPER_NO_CONTRACT; |
1286 | |
1287 | //------------------------------------------------------------------------------- |
1288 | // PRE-SCAN PREPARATION |
1289 | |
1290 | // fetch our table's async and sync scanning info |
1291 | AsyncScanInfo *pAsyncInfo = pTable->pAsyncScanInfo; |
1292 | ScanCallbackInfo *pCallbackInfo = pAsyncInfo->pCallbackInfo; |
1293 | |
1294 | // make a note that we are now processing this segment |
1295 | pCallbackInfo->pCurrentSegment = pSegment; |
1296 | |
1297 | #ifndef DACCESS_COMPILE |
1298 | // loop through and lock down all the blocks referenced by the queue |
1299 | ProcessScanQueue(pAsyncInfo, ProcessScanQNode, (uintptr_t)BlockLockBlocks, FALSE); |
1300 | #endif |
1301 | |
1302 | //------------------------------------------------------------------------------- |
1303 | // ASYNCHRONOUS SCANNING OF QUEUED BLOCKS |
1304 | // |
1305 | |
1306 | // leave the table lock |
1307 | _ASSERTE(pCrstHolder->GetValue()==(&pTable->Lock)); |
1308 | pCrstHolder->Release(); |
1309 | |
1310 | // sanity - this isn't a very asynchronous scan if we don't actually leave |
1311 | _ASSERTE(!pTable->Lock.OwnedByCurrentThread()); |
1312 | |
1313 | // perform the actual scanning of the specified blocks |
1314 | ProcessScanQueue(pAsyncInfo, ProcessScanQNode, (uintptr_t)pAsyncInfo->pfnBlockHandler, FALSE); |
1315 | |
1316 | // re-enter the table lock |
1317 | pCrstHolder->Acquire(); |
1318 | |
1319 | |
1320 | //------------------------------------------------------------------------------- |
1321 | // POST-SCAN CLEANUP |
1322 | // |
1323 | |
1324 | #ifndef DACCESS_COMPILE |
1325 | // loop through, unlock all the blocks we had locked, and reset the queue nodes |
1326 | ProcessScanQueue(pAsyncInfo, UnlockAndForgetQueuedBlocks, (uintptr_t)NULL, FALSE); |
1327 | #endif |
1328 | |
1329 | // we are done processing this segment |
1330 | pCallbackInfo->pCurrentSegment = NULL; |
1331 | |
1332 | // reset the "queue tail" pointer to indicate an empty queue |
1333 | pAsyncInfo->pQueueTail = NULL; |
1334 | } |
1335 | |
1336 | /*--------------------------------------------------------------------------*/ |
1337 | |
1338 | |
1339 | |
1340 | /**************************************************************************** |
1341 | * |
1342 | * SEGMENT ITERATORS |
1343 | * |
1344 | ****************************************************************************/ |
1345 | |
1346 | /* |
1347 | * QuickSegmentIterator |
1348 | * |
1349 | * Returns the next segment to be scanned in a scanning loop. |
1350 | * |
1351 | */ |
1352 | PTR_TableSegment CALLBACK QuickSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *) |
1353 | { |
1354 | LIMITED_METHOD_CONTRACT; |
1355 | |
1356 | PTR_TableSegment pNextSegment; |
1357 | |
1358 | // do we have a previous segment? |
1359 | if (!pPrevSegment) |
1360 | { |
1361 | // nope - start with the first segment in our list |
1362 | pNextSegment = pTable->pSegmentList; |
1363 | } |
1364 | else |
1365 | { |
1366 | // yup, fetch the next segment in the list |
1367 | pNextSegment = pPrevSegment->pNextSegment; |
1368 | } |
1369 | |
1370 | // return the segment pointer |
1371 | return pNextSegment; |
1372 | } |
1373 | |
1374 | |
1375 | /* |
1376 | * StandardSegmentIterator |
1377 | * |
1378 | * Returns the next segment to be scanned in a scanning loop. |
1379 | * |
1380 | * This iterator performs some maintenance on the segments, |
1381 | * primarily making sure the block chains are sorted so that |
1382 | * g0 scans are more likely to operate on contiguous blocks. |
1383 | * |
1384 | */ |
1385 | PTR_TableSegment CALLBACK StandardSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *) |
1386 | { |
1387 | CONTRACTL |
1388 | { |
1389 | WRAPPER(NOTHROW); |
1390 | WRAPPER(GC_TRIGGERS); |
1391 | FORBID_FAULT; |
1392 | SUPPORTS_DAC; |
1393 | } |
1394 | CONTRACTL_END; |
1395 | |
1396 | // get the next segment using the quick iterator |
1397 | PTR_TableSegment pNextSegment = QuickSegmentIterator(pTable, pPrevSegment); |
1398 | |
1399 | #ifndef DACCESS_COMPILE |
1400 | // re-sort the block chains if neccessary |
1401 | if (pNextSegment && pNextSegment->fResortChains) |
1402 | SegmentResortChains(pNextSegment); |
1403 | #endif |
1404 | |
1405 | // return the segment we found |
1406 | return pNextSegment; |
1407 | } |
1408 | |
1409 | |
1410 | /* |
1411 | * FullSegmentIterator |
1412 | * |
1413 | * Returns the next segment to be scanned in a scanning loop. |
1414 | * |
1415 | * This iterator performs full maintenance on the segments, |
1416 | * including freeing those it notices are empty along the way. |
1417 | * |
1418 | */ |
1419 | PTR_TableSegment CALLBACK FullSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *) |
1420 | { |
1421 | CONTRACTL |
1422 | { |
1423 | WRAPPER(THROWS); |
1424 | WRAPPER(GC_TRIGGERS); |
1425 | FORBID_FAULT; |
1426 | SUPPORTS_DAC; |
1427 | } |
1428 | CONTRACTL_END; |
1429 | |
1430 | // we will be resetting the next segment's sequence number |
1431 | uint32_t uSequence = 0; |
1432 | |
1433 | // if we have a previous segment then compute the next sequence number from it |
1434 | if (pPrevSegment) |
1435 | uSequence = (uint32_t)pPrevSegment->bSequence + 1; |
1436 | |
1437 | // loop until we find an appropriate segment to return |
1438 | PTR_TableSegment pNextSegment; |
1439 | for (;;) |
1440 | { |
1441 | // first, call the standard iterator to get the next segment |
1442 | pNextSegment = StandardSegmentIterator(pTable, pPrevSegment); |
1443 | |
1444 | // if there are no more segments then we're done |
1445 | if (!pNextSegment) |
1446 | break; |
1447 | |
1448 | #ifndef DACCESS_COMPILE |
1449 | // check if we should decommit any excess pages in this segment |
1450 | if (DoesSegmentNeedsToTrimExcessPages(pNextSegment)) |
1451 | { |
1452 | CrstHolder ch(&pTable->Lock); |
1453 | SegmentTrimExcessPages(pNextSegment); |
1454 | } |
1455 | #endif |
1456 | |
1457 | // if the segment has handles in it then it will survive and be returned |
1458 | if (pNextSegment->bEmptyLine > 0) |
1459 | { |
1460 | // update this segment's sequence number |
1461 | pNextSegment->bSequence = (uint8_t)(uSequence % 0x100); |
1462 | |
1463 | // break out and return the segment |
1464 | break; |
1465 | } |
1466 | |
1467 | #ifndef DACCESS_COMPILE |
1468 | CrstHolder ch(&pTable->Lock); |
1469 | // this segment is completely empty - can we free it now? |
1470 | if (pNextSegment->bEmptyLine == 0 && TableCanFreeSegmentNow(pTable, pNextSegment)) |
1471 | { |
1472 | // yup, we probably want to free this one |
1473 | PTR_TableSegment pNextNext = pNextSegment->pNextSegment; |
1474 | |
1475 | // was this the first segment in the list? |
1476 | if (!pPrevSegment) |
1477 | { |
1478 | // yes - are there more segments? |
1479 | if (pNextNext) |
1480 | { |
1481 | // yes - unlink the head |
1482 | pTable->pSegmentList = pNextNext; |
1483 | } |
1484 | else |
1485 | { |
1486 | // no - leave this one in the list and enumerate it |
1487 | break; |
1488 | } |
1489 | } |
1490 | else |
1491 | { |
1492 | // no - unlink this segment from the segment list |
1493 | pPrevSegment->pNextSegment = pNextNext; |
1494 | } |
1495 | |
1496 | // free this segment |
1497 | SegmentFree(pNextSegment); |
1498 | } |
1499 | #else |
1500 | // The code above has a side effect we need to preserve: |
1501 | // while neither pNextSegment nor pPrevSegment are modified, their fields |
1502 | // are, which affects the handle table walk. Since TableCanFreeSegmentNow |
1503 | // actually only checks to see if something is asynchronously scanning this |
1504 | // segment (and returns FALSE if something is), we'll assume it always |
1505 | // returns TRUE. (Since we can't free memory in the Dac, it doesn't matter |
1506 | // if there's another async scan going on.) |
1507 | pPrevSegment = pNextSegment; |
1508 | #endif |
1509 | } |
1510 | |
1511 | // return the segment we found |
1512 | return pNextSegment; |
1513 | } |
1514 | |
1515 | /* |
1516 | * xxxAsyncSegmentIterator |
1517 | * |
1518 | * Implements the core handle scanning loop for a table. |
1519 | * |
1520 | * This iterator wraps another iterator, checking for queued blocks from the |
1521 | * previous segment before advancing to the next. If there are queued blocks, |
1522 | * the function processes them by calling xxxTableScanQueuedBlocksAsync. |
1523 | * |
1524 | * N.B. THIS FUNCTION LEAVES THE TABLE LOCK WHILE SCANNING. |
1525 | * |
1526 | */ |
1527 | PTR_TableSegment CALLBACK xxxAsyncSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *pCrstHolder) |
1528 | { |
1529 | WRAPPER_NO_CONTRACT; |
1530 | |
1531 | // fetch our table's async scanning info |
1532 | AsyncScanInfo *pAsyncInfo = pTable->pAsyncScanInfo; |
1533 | |
1534 | // sanity |
1535 | _ASSERTE(pAsyncInfo); |
1536 | |
1537 | // if we have queued some blocks from the previous segment then scan them now |
1538 | if (pAsyncInfo->pQueueTail) |
1539 | xxxTableScanQueuedBlocksAsync(pTable, pPrevSegment, pCrstHolder); |
1540 | |
1541 | // fetch the underlying iterator from our async info |
1542 | SEGMENTITERATOR pfnCoreIterator = pAsyncInfo->pfnSegmentIterator; |
1543 | |
1544 | // call the underlying iterator to get the next segment |
1545 | return pfnCoreIterator(pTable, pPrevSegment, pCrstHolder); |
1546 | } |
1547 | |
1548 | /*--------------------------------------------------------------------------*/ |
1549 | |
1550 | |
1551 | |
1552 | /**************************************************************************** |
1553 | * |
1554 | * CORE SCANNING LOGIC |
1555 | * |
1556 | ****************************************************************************/ |
1557 | |
1558 | /* |
1559 | * SegmentScanByTypeChain |
1560 | * |
1561 | * Implements the single-type block scanning loop for a single segment. |
1562 | * |
1563 | */ |
1564 | void SegmentScanByTypeChain(PTR_TableSegment pSegment, uint32_t uType, BLOCKSCANPROC pfnBlockHandler, ScanCallbackInfo *pInfo) |
1565 | { |
1566 | WRAPPER_NO_CONTRACT; |
1567 | |
1568 | // hope we are enumerating a valid type chain :) |
1569 | _ASSERTE(uType < HANDLE_MAX_INTERNAL_TYPES); |
1570 | |
1571 | // fetch the tail |
1572 | uint32_t uBlock = pSegment->rgTail[uType]; |
1573 | |
1574 | // if we didn't find a terminator then there's blocks to enumerate |
1575 | if (uBlock != BLOCK_INVALID) |
1576 | { |
1577 | // start walking from the head |
1578 | uBlock = pSegment->rgAllocation[uBlock]; |
1579 | |
1580 | // scan until we loop back to the first block |
1581 | uint32_t uHead = uBlock; |
1582 | do |
1583 | { |
1584 | // search forward trying to batch up sequential runs of blocks |
1585 | uint32_t uLast, uNext = uBlock; |
1586 | do |
1587 | { |
1588 | // compute the next sequential block for comparison |
1589 | uLast = uNext + 1; |
1590 | |
1591 | // fetch the next block in the allocation chain |
1592 | uNext = pSegment->rgAllocation[uNext]; |
1593 | |
1594 | } while ((uNext == uLast) && (uNext != uHead)); |
1595 | |
1596 | // call the calback for this group of blocks |
1597 | pfnBlockHandler(pSegment, uBlock, (uLast - uBlock), pInfo); |
1598 | |
1599 | // advance to the next block |
1600 | uBlock = uNext; |
1601 | |
1602 | } while (uBlock != uHead); |
1603 | } |
1604 | } |
1605 | |
1606 | |
1607 | /* |
1608 | * SegmentScanByTypeMap |
1609 | * |
1610 | * Implements the multi-type block scanning loop for a single segment. |
1611 | * |
1612 | */ |
1613 | void SegmentScanByTypeMap(PTR_TableSegment pSegment, const BOOL *rgTypeInclusion, |
1614 | BLOCKSCANPROC pfnBlockHandler, ScanCallbackInfo *pInfo) |
1615 | { |
1616 | WRAPPER_NO_CONTRACT; |
1617 | |
1618 | // start scanning with the first block in the segment |
1619 | uint32_t uBlock = 0; |
1620 | |
1621 | // we don't need to scan the whole segment, just up to the empty line |
1622 | uint32_t uLimit = pSegment->bEmptyLine; |
1623 | |
1624 | // loop across the segment looking for blocks to scan |
1625 | for (;;) |
1626 | { |
1627 | // find the first block included by the type map |
1628 | for (;;) |
1629 | { |
1630 | // if we are out of range looking for a start point then we're done |
1631 | if (uBlock >= uLimit) |
1632 | return; |
1633 | |
1634 | // if the type is one we are scanning then we found a start point |
1635 | if (IsBlockIncluded(pSegment, uBlock, rgTypeInclusion)) |
1636 | break; |
1637 | |
1638 | // keep searching with the next block |
1639 | uBlock++; |
1640 | } |
1641 | |
1642 | // remember this block as the first that needs scanning |
1643 | uint32_t uFirst = uBlock; |
1644 | |
1645 | // find the next block not included in the type map |
1646 | for (;;) |
1647 | { |
1648 | // advance the block index |
1649 | uBlock++; |
1650 | |
1651 | // if we are beyond the limit then we are done |
1652 | if (uBlock >= uLimit) |
1653 | break; |
1654 | |
1655 | // if the type is not one we are scanning then we found an end point |
1656 | if (!IsBlockIncluded(pSegment, uBlock, rgTypeInclusion)) |
1657 | break; |
1658 | } |
1659 | |
1660 | // call the calback for the group of blocks we found |
1661 | pfnBlockHandler(pSegment, uFirst, (uBlock - uFirst), pInfo); |
1662 | |
1663 | // look for another range starting with the next block |
1664 | uBlock++; |
1665 | } |
1666 | } |
1667 | |
1668 | |
1669 | /* |
1670 | * TableScanHandles |
1671 | * |
1672 | * Implements the core handle scanning loop for a table. |
1673 | * |
1674 | */ |
1675 | void CALLBACK TableScanHandles(PTR_HandleTable pTable, |
1676 | const uint32_t *puType, |
1677 | uint32_t uTypeCount, |
1678 | SEGMENTITERATOR pfnSegmentIterator, |
1679 | BLOCKSCANPROC pfnBlockHandler, |
1680 | ScanCallbackInfo *pInfo, |
1681 | CrstHolderWithState *pCrstHolder) |
1682 | { |
1683 | WRAPPER_NO_CONTRACT; |
1684 | |
1685 | // sanity - caller must ALWAYS provide a valid ScanCallbackInfo |
1686 | _ASSERTE(pInfo); |
1687 | |
1688 | // we may need a type inclusion map for multi-type scans |
1689 | BOOL rgTypeInclusion[INCLUSION_MAP_SIZE]; |
1690 | |
1691 | // we only need to scan types if we have a type array and a callback to call |
1692 | if (!pfnBlockHandler || !puType) |
1693 | uTypeCount = 0; |
1694 | |
1695 | // if we will be scanning more than one type then initialize the inclusion map |
1696 | if (uTypeCount > 1) |
1697 | BuildInclusionMap(rgTypeInclusion, puType, uTypeCount); |
1698 | |
1699 | // now, iterate over the segments, scanning blocks of the specified type(s) |
1700 | PTR_TableSegment pSegment = NULL; |
1701 | while ((pSegment = pfnSegmentIterator(pTable, pSegment, pCrstHolder)) != NULL) |
1702 | { |
1703 | // if there are types to scan then enumerate the blocks in this segment |
1704 | // (we do this test inside the loop since the iterators should still run...) |
1705 | if (uTypeCount >= 1) |
1706 | { |
1707 | // make sure the "current segment" pointer in the scan info is up to date |
1708 | pInfo->pCurrentSegment = pSegment; |
1709 | |
1710 | // is this a single type or multi-type enumeration? |
1711 | if (uTypeCount == 1) |
1712 | { |
1713 | // single type enumeration - walk the type's allocation chain |
1714 | SegmentScanByTypeChain(pSegment, *puType, pfnBlockHandler, pInfo); |
1715 | } |
1716 | else |
1717 | { |
1718 | // multi-type enumeration - walk the type map to find eligible blocks |
1719 | SegmentScanByTypeMap(pSegment, rgTypeInclusion, pfnBlockHandler, pInfo); |
1720 | } |
1721 | |
1722 | // make sure the "current segment" pointer in the scan info is up to date |
1723 | pInfo->pCurrentSegment = NULL; |
1724 | } |
1725 | } |
1726 | } |
1727 | |
1728 | |
1729 | /* |
1730 | * xxxTableScanHandlesAsync |
1731 | * |
1732 | * Implements asynchronous handle scanning for a table. |
1733 | * |
1734 | * N.B. THIS FUNCTION LEAVES THE TABLE LOCK WHILE SCANNING. |
1735 | * |
1736 | */ |
1737 | void CALLBACK xxxTableScanHandlesAsync(PTR_HandleTable pTable, |
1738 | const uint32_t *puType, |
1739 | uint32_t uTypeCount, |
1740 | SEGMENTITERATOR pfnSegmentIterator, |
1741 | BLOCKSCANPROC pfnBlockHandler, |
1742 | ScanCallbackInfo *pInfo, |
1743 | CrstHolderWithState *pCrstHolder) |
1744 | { |
1745 | WRAPPER_NO_CONTRACT; |
1746 | |
1747 | // presently only one async scan is allowed at a time |
1748 | if (pTable->pAsyncScanInfo) |
1749 | { |
1750 | // somebody tried to kick off multiple async scans |
1751 | _ASSERTE(FALSE); |
1752 | return; |
1753 | } |
1754 | |
1755 | |
1756 | //------------------------------------------------------------------------------- |
1757 | // PRE-SCAN PREPARATION |
1758 | |
1759 | // we keep an initial scan list node on the stack (for perf) |
1760 | ScanQNode initialNode; |
1761 | |
1762 | initialNode.pNext = NULL; |
1763 | initialNode.uEntries = 0; |
1764 | |
1765 | // initialize our async scanning info |
1766 | AsyncScanInfo asyncInfo; |
1767 | |
1768 | asyncInfo.pCallbackInfo = pInfo; |
1769 | asyncInfo.pfnSegmentIterator = pfnSegmentIterator; |
1770 | asyncInfo.pfnBlockHandler = pfnBlockHandler; |
1771 | asyncInfo.pScanQueue = &initialNode; |
1772 | asyncInfo.pQueueTail = NULL; |
1773 | |
1774 | // link our async scan info into the table |
1775 | pTable->pAsyncScanInfo = &asyncInfo; |
1776 | |
1777 | |
1778 | //------------------------------------------------------------------------------- |
1779 | // PER-SEGMENT ASYNCHRONOUS SCANNING OF BLOCKS |
1780 | // |
1781 | |
1782 | // call the synchronous scanner with our async callbacks |
1783 | TableScanHandles(pTable, |
1784 | puType, uTypeCount, |
1785 | xxxAsyncSegmentIterator, |
1786 | BlockQueueBlocksForAsyncScan, |
1787 | pInfo, |
1788 | pCrstHolder); |
1789 | |
1790 | |
1791 | //------------------------------------------------------------------------------- |
1792 | // POST-SCAN CLEANUP |
1793 | // |
1794 | |
1795 | // if we dynamically allocated more nodes then free them now |
1796 | if (initialNode.pNext) |
1797 | { |
1798 | // adjust the head to point to the first dynamically allocated block |
1799 | asyncInfo.pScanQueue = initialNode.pNext; |
1800 | |
1801 | // loop through and free all the queue nodes |
1802 | ProcessScanQueue(&asyncInfo, FreeScanQNode, (uintptr_t)NULL, TRUE); |
1803 | } |
1804 | |
1805 | // unlink our async scanning info from the table |
1806 | pTable->pAsyncScanInfo = NULL; |
1807 | } |
1808 | |
1809 | #ifdef DACCESS_COMPILE |
1810 | // TableSegment is variable size, where the data up to "rgValue" is static, |
1811 | // then more is committed as TableSegment::bCommitLine * HANDLE_BYTES_PER_BLOCK. |
1812 | // See SegmentInitialize in HandleTableCore.cpp. |
1813 | uint32_t TableSegment::DacSize(TADDR addr) |
1814 | { |
1815 | WRAPPER_NO_CONTRACT; |
1816 | |
1817 | uint8_t commitLine = 0; |
1818 | DacReadAll(addr + offsetof(TableSegment, bCommitLine), &commitLine, sizeof(commitLine), true); |
1819 | |
1820 | return offsetof(TableSegment, rgValue) + (uint32_t)commitLine * HANDLE_BYTES_PER_BLOCK; |
1821 | } |
1822 | #endif |
1823 | /*--------------------------------------------------------------------------*/ |
1824 | |
1825 | |