1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | /* |
6 | * Generational GC handle manager. Internal Implementation Header. |
7 | * |
8 | * Shared defines and declarations for handle table implementation. |
9 | * |
10 | |
11 | * |
12 | */ |
13 | |
14 | #include "common.h" |
15 | |
16 | #include "handletable.h" |
17 | |
18 | /*--------------------------------------------------------------------------*/ |
19 | |
20 | //<TODO>@TODO: find a home for this in a project-level header file</TODO> |
21 | #define BITS_PER_BYTE (8) |
22 | /*--------------------------------------------------------------------------*/ |
23 | |
24 | |
25 | |
26 | /**************************************************************************** |
27 | * |
28 | * MAJOR TABLE DEFINITIONS THAT CHANGE DEPENDING ON THE WEATHER |
29 | * |
30 | ****************************************************************************/ |
31 | |
32 | // 64k reserved per segment with 4k as header. |
33 | #define HANDLE_SEGMENT_SIZE (0x10000) // MUST be a power of 2 (and currently must be 64K due to VirtualAlloc semantics) |
34 | #define HANDLE_HEADER_SIZE (0x1000) // SHOULD be <= OS page size |
35 | |
36 | #define HANDLE_SEGMENT_ALIGNMENT HANDLE_SEGMENT_SIZE |
37 | |
38 | |
39 | #if !BIGENDIAN |
40 | |
41 | // little-endian write barrier mask manipulation |
42 | #define GEN_CLUMP_0_MASK (0x000000FF) |
43 | #define NEXT_CLUMP_IN_MASK(dw) (dw >> BITS_PER_BYTE) |
44 | |
45 | #else |
46 | |
47 | // big-endian write barrier mask manipulation |
48 | #define GEN_CLUMP_0_MASK (0xFF000000) |
49 | #define NEXT_CLUMP_IN_MASK(dw) (dw << BITS_PER_BYTE) |
50 | |
51 | #endif |
52 | |
53 | |
54 | // if the above numbers change than these will likely change as well |
55 | #define HANDLE_HANDLES_PER_CLUMP (16) // segment write-barrier granularity |
56 | #define HANDLE_HANDLES_PER_BLOCK (64) // segment suballocation granularity |
57 | #define HANDLE_OPTIMIZE_FOR_64_HANDLE_BLOCKS // flag for certain optimizations |
58 | |
59 | // number of types allowed for public callers |
60 | #define HANDLE_MAX_PUBLIC_TYPES (HANDLE_MAX_INTERNAL_TYPES - 1) // reserve one internal type |
61 | |
62 | // internal block types |
63 | #define HNDTYPE_INTERNAL_DATABLOCK (HANDLE_MAX_INTERNAL_TYPES - 1) // reserve last type for data blocks |
64 | |
65 | // max number of generations to support statistics on |
66 | #define MAXSTATGEN (5) |
67 | |
68 | /*--------------------------------------------------------------------------*/ |
69 | |
70 | |
71 | |
72 | /**************************************************************************** |
73 | * |
74 | * MORE DEFINITIONS |
75 | * |
76 | ****************************************************************************/ |
77 | |
78 | // fast handle-to-segment mapping |
79 | #define HANDLE_SEGMENT_CONTENT_MASK (HANDLE_SEGMENT_SIZE - 1) |
80 | #define HANDLE_SEGMENT_ALIGN_MASK (~HANDLE_SEGMENT_CONTENT_MASK) |
81 | |
82 | // table layout metrics |
83 | #define HANDLE_SIZE sizeof(_UNCHECKED_OBJECTREF) |
84 | #define HANDLE_HANDLES_PER_SEGMENT ((HANDLE_SEGMENT_SIZE - HANDLE_HEADER_SIZE) / HANDLE_SIZE) |
85 | #define HANDLE_BLOCKS_PER_SEGMENT (HANDLE_HANDLES_PER_SEGMENT / HANDLE_HANDLES_PER_BLOCK) |
86 | #define HANDLE_CLUMPS_PER_SEGMENT (HANDLE_HANDLES_PER_SEGMENT / HANDLE_HANDLES_PER_CLUMP) |
87 | #define HANDLE_CLUMPS_PER_BLOCK (HANDLE_HANDLES_PER_BLOCK / HANDLE_HANDLES_PER_CLUMP) |
88 | #define HANDLE_BYTES_PER_BLOCK (HANDLE_HANDLES_PER_BLOCK * HANDLE_SIZE) |
89 | #define HANDLE_HANDLES_PER_MASK (sizeof(uint32_t) * BITS_PER_BYTE) |
90 | #define HANDLE_MASKS_PER_SEGMENT (HANDLE_HANDLES_PER_SEGMENT / HANDLE_HANDLES_PER_MASK) |
91 | #define HANDLE_MASKS_PER_BLOCK (HANDLE_HANDLES_PER_BLOCK / HANDLE_HANDLES_PER_MASK) |
92 | #define HANDLE_CLUMPS_PER_MASK (HANDLE_HANDLES_PER_MASK / HANDLE_HANDLES_PER_CLUMP) |
93 | |
94 | // We use this relation to check for free mask per block. |
95 | C_ASSERT (HANDLE_HANDLES_PER_MASK * 2 == HANDLE_HANDLES_PER_BLOCK); |
96 | |
97 | |
98 | // cache layout metrics |
99 | #define HANDLE_CACHE_TYPE_SIZE 128 // 128 == 63 handles per bank |
100 | #define HANDLES_PER_CACHE_BANK ((HANDLE_CACHE_TYPE_SIZE / 2) - 1) |
101 | |
102 | // cache policy defines |
103 | #define REBALANCE_TOLERANCE (HANDLES_PER_CACHE_BANK / 3) |
104 | #define REBALANCE_LOWATER_MARK (HANDLES_PER_CACHE_BANK - REBALANCE_TOLERANCE) |
105 | #define REBALANCE_HIWATER_MARK (HANDLES_PER_CACHE_BANK + REBALANCE_TOLERANCE) |
106 | |
107 | // bulk alloc policy defines |
108 | #define SMALL_ALLOC_COUNT (HANDLES_PER_CACHE_BANK / 10) |
109 | |
110 | // misc constants |
111 | #define MASK_FULL (0) |
112 | #define MASK_EMPTY (0xFFFFFFFF) |
113 | #define MASK_LOBYTE (0x000000FF) |
114 | #define TYPE_INVALID ((uint8_t)0xFF) |
115 | #define BLOCK_INVALID ((uint8_t)0xFF) |
116 | |
117 | /*--------------------------------------------------------------------------*/ |
118 | |
119 | |
120 | |
121 | /**************************************************************************** |
122 | * |
123 | * CORE TABLE LAYOUT STRUCTURES |
124 | * |
125 | ****************************************************************************/ |
126 | |
127 | /* |
128 | * we need byte packing for the handle table layout to work |
129 | */ |
130 | #pragma pack(push,1) |
131 | |
132 | |
133 | /* |
134 | * Table Segment Header |
135 | * |
136 | * Defines the layout for a segment's header data. |
137 | */ |
138 | struct |
139 | { |
140 | /* |
141 | * Write Barrier Generation Numbers |
142 | * |
143 | * Each slot holds four bytes. Each byte corresponds to a clump of handles. |
144 | * The value of the byte corresponds to the lowest possible generation that a |
145 | * handle in that clump could point into. |
146 | * |
147 | * WARNING: Although this array is logically organized as a uint8_t[], it is sometimes |
148 | * accessed as uint32_t[] when processing bytes in parallel. Code which treats the |
149 | * array as an array of ULONG32s must handle big/little endian issues itself. |
150 | */ |
151 | uint8_t [HANDLE_BLOCKS_PER_SEGMENT * sizeof(uint32_t) / sizeof(uint8_t)]; |
152 | |
153 | /* |
154 | * Block Allocation Chains |
155 | * |
156 | * Each slot indexes the next block in an allocation chain. |
157 | */ |
158 | uint8_t [HANDLE_BLOCKS_PER_SEGMENT]; |
159 | |
160 | /* |
161 | * Block Free Masks |
162 | * |
163 | * Masks - 1 bit for every handle in the segment. |
164 | */ |
165 | uint32_t [HANDLE_MASKS_PER_SEGMENT]; |
166 | |
167 | /* |
168 | * Block Handle Types |
169 | * |
170 | * Each slot holds the handle type of the associated block. |
171 | */ |
172 | uint8_t [HANDLE_BLOCKS_PER_SEGMENT]; |
173 | |
174 | /* |
175 | * Block User Data Map |
176 | * |
177 | * Each slot holds the index of a user data block (if any) for the associated block. |
178 | */ |
179 | uint8_t [HANDLE_BLOCKS_PER_SEGMENT]; |
180 | |
181 | /* |
182 | * Block Lock Count |
183 | * |
184 | * Each slot holds a lock count for its associated block. |
185 | * Locked blocks are not freed, even when empty. |
186 | */ |
187 | uint8_t [HANDLE_BLOCKS_PER_SEGMENT]; |
188 | |
189 | /* |
190 | * Allocation Chain Tails |
191 | * |
192 | * Each slot holds the tail block index for an allocation chain. |
193 | */ |
194 | uint8_t [HANDLE_MAX_INTERNAL_TYPES]; |
195 | |
196 | /* |
197 | * Allocation Chain Hints |
198 | * |
199 | * Each slot holds a hint block index for an allocation chain. |
200 | */ |
201 | uint8_t [HANDLE_MAX_INTERNAL_TYPES]; |
202 | |
203 | /* |
204 | * Free Count |
205 | * |
206 | * Each slot holds the number of free handles in an allocation chain. |
207 | */ |
208 | uint32_t [HANDLE_MAX_INTERNAL_TYPES]; |
209 | |
210 | /* |
211 | * Next Segment |
212 | * |
213 | * Points to the next segment in the chain (if we ran out of space in this one). |
214 | */ |
215 | #ifdef DACCESS_COMPILE |
216 | TADDR pNextSegment; |
217 | #else |
218 | struct TableSegment *; |
219 | #endif // DACCESS_COMPILE |
220 | |
221 | /* |
222 | * Handle Table |
223 | * |
224 | * Points to owning handle table for this table segment. |
225 | */ |
226 | PTR_HandleTable pHandleTable; |
227 | |
228 | /* |
229 | * Flags |
230 | */ |
231 | uint8_t : 1; // allocation chains need sorting |
232 | uint8_t : 1; // free blocks need scavenging |
233 | uint8_t : 6; // unused |
234 | |
235 | /* |
236 | * Free List Head |
237 | * |
238 | * Index of the first free block in the segment. |
239 | */ |
240 | uint8_t ; |
241 | |
242 | /* |
243 | * Empty Line |
244 | * |
245 | * Index of the first KNOWN block of the last group of unused blocks in the segment. |
246 | */ |
247 | uint8_t ; |
248 | |
249 | /* |
250 | * Commit Line |
251 | * |
252 | * Index of the first uncommited block in the segment. |
253 | */ |
254 | uint8_t ; |
255 | |
256 | /* |
257 | * Decommit Line |
258 | * |
259 | * Index of the first block in the highest committed page of the segment. |
260 | */ |
261 | uint8_t ; |
262 | |
263 | /* |
264 | * Sequence |
265 | * |
266 | * Indicates the segment sequence number. |
267 | */ |
268 | uint8_t ; |
269 | }; |
270 | |
271 | typedef DPTR(struct _TableSegmentHeader) ; |
272 | typedef DPTR(uintptr_t) PTR_uintptr_t; |
273 | |
274 | // The handle table is large and may not be entirely mapped. That's one reason for splitting out the table |
275 | // segment and the header as two separate classes. In DAC builds, we generally need only a single element from |
276 | // the table segment, so we can use the DAC to retrieve just the information we require. |
277 | /* |
278 | * Table Segment |
279 | * |
280 | * Defines the layout for a handle table segment. |
281 | */ |
282 | struct TableSegment : public _TableSegmentHeader |
283 | { |
284 | /* |
285 | * Filler |
286 | */ |
287 | uint8_t rgUnused[HANDLE_HEADER_SIZE - sizeof(_TableSegmentHeader)]; |
288 | |
289 | /* |
290 | * Handles |
291 | */ |
292 | _UNCHECKED_OBJECTREF rgValue[HANDLE_HANDLES_PER_SEGMENT]; |
293 | |
294 | #ifdef DACCESS_COMPILE |
295 | static uint32_t DacSize(TADDR addr); |
296 | #endif |
297 | }; |
298 | |
299 | typedef SPTR(struct TableSegment) PTR_TableSegment; |
300 | |
301 | /* |
302 | * restore default packing |
303 | */ |
304 | #pragma pack(pop) |
305 | |
306 | |
307 | /* |
308 | * Handle Type Cache |
309 | * |
310 | * Defines the layout of a per-type handle cache. |
311 | */ |
312 | struct HandleTypeCache |
313 | { |
314 | /* |
315 | * reserve bank |
316 | */ |
317 | OBJECTHANDLE rgReserveBank[HANDLES_PER_CACHE_BANK]; |
318 | |
319 | /* |
320 | * index of next available handle slot in the reserve bank |
321 | */ |
322 | int32_t lReserveIndex; |
323 | |
324 | |
325 | /*--------------------------------------------------------------------------------- |
326 | * N.B. this structure is split up this way so that when HANDLES_PER_CACHE_BANK is |
327 | * large enough, lReserveIndex and lFreeIndex will reside in different cache lines |
328 | *--------------------------------------------------------------------------------*/ |
329 | |
330 | /* |
331 | * free bank |
332 | */ |
333 | OBJECTHANDLE rgFreeBank[HANDLES_PER_CACHE_BANK]; |
334 | |
335 | /* |
336 | * index of next empty slot in the free bank |
337 | */ |
338 | int32_t lFreeIndex; |
339 | }; |
340 | |
341 | /* |
342 | * Async pin EE callback context, used to call back tot he EE when enumerating |
343 | * over async pinned handles. |
344 | */ |
345 | class AsyncPinCallbackContext |
346 | { |
347 | private: |
348 | async_pin_enum_fn m_callback; |
349 | void* m_context; |
350 | |
351 | public: |
352 | /* |
353 | * Constructs a new AsyncPinCallbackContext from a callback and a context, |
354 | * which will be passed to the callback as its second parameter every time |
355 | * it is invoked. |
356 | */ |
357 | AsyncPinCallbackContext(async_pin_enum_fn callback, void* context) |
358 | : m_callback(callback), m_context(context) |
359 | {} |
360 | |
361 | /* |
362 | * Invokes the callback with the given argument, returning the callback's |
363 | * result.' |
364 | */ |
365 | bool Invoke(Object* argument) const |
366 | { |
367 | assert(m_callback != nullptr); |
368 | return m_callback(argument, m_context); |
369 | } |
370 | }; |
371 | |
372 | |
373 | /*---------------------------------------------------------------------------*/ |
374 | |
375 | |
376 | |
377 | /**************************************************************************** |
378 | * |
379 | * SCANNING PROTOTYPES |
380 | * |
381 | ****************************************************************************/ |
382 | |
383 | /* |
384 | * ScanCallbackInfo |
385 | * |
386 | * Carries parameters for per-segment and per-block scanning callbacks. |
387 | * |
388 | */ |
389 | struct ScanCallbackInfo |
390 | { |
391 | PTR_TableSegment pCurrentSegment; // segment we are presently scanning, if any |
392 | uint32_t uFlags; // HNDGCF_* flags |
393 | BOOL fEnumUserData; // whether user data is being enumerated as well |
394 | HANDLESCANPROC pfnScan; // per-handle scan callback |
395 | uintptr_t param1; // callback param 1 |
396 | uintptr_t param2; // callback param 2 |
397 | uint32_t dwAgeMask; // generation mask for ephemeral GCs |
398 | |
399 | #ifdef _DEBUG |
400 | uint32_t DEBUG_BlocksScanned; |
401 | uint32_t DEBUG_BlocksScannedNonTrivially; |
402 | uint32_t DEBUG_HandleSlotsScanned; |
403 | uint32_t DEBUG_HandlesActuallyScanned; |
404 | #endif |
405 | }; |
406 | |
407 | |
408 | /* |
409 | * BLOCKSCANPROC |
410 | * |
411 | * Prototype for callbacks that implement per-block scanning logic. |
412 | * |
413 | */ |
414 | typedef void (CALLBACK *BLOCKSCANPROC)(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo); |
415 | |
416 | |
417 | /* |
418 | * SEGMENTITERATOR |
419 | * |
420 | * Prototype for callbacks that implement per-segment scanning logic. |
421 | * |
422 | */ |
423 | typedef PTR_TableSegment (CALLBACK *SEGMENTITERATOR)(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *pCrstHolder); |
424 | |
425 | |
426 | /* |
427 | * TABLESCANPROC |
428 | * |
429 | * Prototype for TableScanHandles and xxxTableScanHandlesAsync. |
430 | * |
431 | */ |
432 | typedef void (CALLBACK *TABLESCANPROC)(PTR_HandleTable pTable, |
433 | const uint32_t *puType, uint32_t uTypeCount, |
434 | SEGMENTITERATOR pfnSegmentIterator, |
435 | BLOCKSCANPROC pfnBlockHandler, |
436 | ScanCallbackInfo *pInfo, |
437 | CrstHolderWithState *pCrstHolder); |
438 | |
439 | /*--------------------------------------------------------------------------*/ |
440 | |
441 | |
442 | |
443 | /**************************************************************************** |
444 | * |
445 | * ADDITIONAL TABLE STRUCTURES |
446 | * |
447 | ****************************************************************************/ |
448 | |
449 | /* |
450 | * AsyncScanInfo |
451 | * |
452 | * Tracks the state of an async scan for a handle table. |
453 | * |
454 | */ |
455 | struct AsyncScanInfo |
456 | { |
457 | /* |
458 | * Underlying Callback Info |
459 | * |
460 | * Specifies callback info for the underlying block handler. |
461 | */ |
462 | struct ScanCallbackInfo *pCallbackInfo; |
463 | |
464 | /* |
465 | * Underlying Segment Iterator |
466 | * |
467 | * Specifies the segment iterator to be used during async scanning. |
468 | */ |
469 | SEGMENTITERATOR pfnSegmentIterator; |
470 | |
471 | /* |
472 | * Underlying Block Handler |
473 | * |
474 | * Specifies the block handler to be used during async scanning. |
475 | */ |
476 | BLOCKSCANPROC pfnBlockHandler; |
477 | |
478 | /* |
479 | * Scan Queue |
480 | * |
481 | * Specifies the nodes to be processed asynchronously. |
482 | */ |
483 | struct ScanQNode *pScanQueue; |
484 | |
485 | /* |
486 | * Queue Tail |
487 | * |
488 | * Specifies the tail node in the queue, or NULL if the queue is empty. |
489 | */ |
490 | struct ScanQNode *pQueueTail; |
491 | }; |
492 | |
493 | |
494 | /* |
495 | * Handle Table |
496 | * |
497 | * Defines the layout of a handle table object. |
498 | */ |
499 | #ifdef _MSC_VER |
500 | #pragma warning(push) |
501 | #pragma warning(disable : 4200 ) // zero-sized array |
502 | #endif |
503 | struct HandleTable |
504 | { |
505 | /* |
506 | * flags describing handle attributes |
507 | * |
508 | * N.B. this is at offset 0 due to frequent access by cache free codepath |
509 | */ |
510 | uint32_t rgTypeFlags[HANDLE_MAX_INTERNAL_TYPES]; |
511 | |
512 | /* |
513 | * per-table AppDomain info |
514 | */ |
515 | ADIndex uADIndex; |
516 | |
517 | /* |
518 | * lock for this table |
519 | */ |
520 | CrstStatic Lock; |
521 | |
522 | /* |
523 | * number of types this table supports |
524 | */ |
525 | uint32_t uTypeCount; |
526 | |
527 | /* |
528 | * number of handles owned by this table that are marked as "used" |
529 | * (this includes the handles residing in rgMainCache and rgQuickCache) |
530 | */ |
531 | uint32_t dwCount; |
532 | |
533 | /* |
534 | * head of segment list for this table |
535 | */ |
536 | PTR_TableSegment pSegmentList; |
537 | |
538 | /* |
539 | * information on current async scan (if any) |
540 | */ |
541 | AsyncScanInfo *pAsyncScanInfo; |
542 | |
543 | /* |
544 | * per-table user info |
545 | */ |
546 | uint32_t uTableIndex; |
547 | |
548 | /* |
549 | * one-level per-type 'quick' handle cache |
550 | */ |
551 | OBJECTHANDLE rgQuickCache[HANDLE_MAX_INTERNAL_TYPES]; // interlocked ops used here |
552 | |
553 | /* |
554 | * debug-only statistics |
555 | */ |
556 | #ifdef _DEBUG |
557 | int _DEBUG_iMaxGen; |
558 | int64_t _DEBUG_TotalBlocksScanned [MAXSTATGEN]; |
559 | int64_t _DEBUG_TotalBlocksScannedNonTrivially[MAXSTATGEN]; |
560 | int64_t _DEBUG_TotalHandleSlotsScanned [MAXSTATGEN]; |
561 | int64_t _DEBUG_TotalHandlesActuallyScanned [MAXSTATGEN]; |
562 | #endif |
563 | |
564 | /* |
565 | * primary per-type handle cache |
566 | */ |
567 | HandleTypeCache rgMainCache[0]; // interlocked ops used here |
568 | }; |
569 | |
570 | #ifdef _MSC_VER |
571 | #pragma warning(pop) |
572 | #endif |
573 | |
574 | /*--------------------------------------------------------------------------*/ |
575 | |
576 | |
577 | |
578 | /**************************************************************************** |
579 | * |
580 | * HELPERS |
581 | * |
582 | ****************************************************************************/ |
583 | |
584 | /* |
585 | * A 32/64 comparison callback |
586 | *<TODO> |
587 | * @TODO: move/merge into common util file |
588 | *</TODO> |
589 | */ |
590 | typedef int (*PFNCOMPARE)(uintptr_t p, uintptr_t q); |
591 | |
592 | |
593 | /* |
594 | * A 32/64 neutral quicksort |
595 | *<TODO> |
596 | * @TODO: move/merge into common util file |
597 | *</TODO> |
598 | */ |
599 | void QuickSort(uintptr_t *pData, int left, int right, PFNCOMPARE pfnCompare); |
600 | |
601 | |
602 | /* |
603 | * CompareHandlesByFreeOrder |
604 | * |
605 | * Returns: |
606 | * <0 - handle P should be freed before handle Q |
607 | * =0 - handles are eqivalent for free order purposes |
608 | * >0 - handle Q should be freed before handle P |
609 | * |
610 | */ |
611 | int CompareHandlesByFreeOrder(uintptr_t p, uintptr_t q); |
612 | |
613 | /*--------------------------------------------------------------------------*/ |
614 | |
615 | |
616 | |
617 | /**************************************************************************** |
618 | * |
619 | * CORE TABLE MANAGEMENT |
620 | * |
621 | ****************************************************************************/ |
622 | |
623 | /* |
624 | * TypeHasUserData |
625 | * |
626 | * Determines whether a given handle type has user data. |
627 | * |
628 | */ |
629 | __inline BOOL TypeHasUserData(HandleTable *pTable, uint32_t uType) |
630 | { |
631 | LIMITED_METHOD_CONTRACT; |
632 | |
633 | // sanity |
634 | _ASSERTE(uType < HANDLE_MAX_INTERNAL_TYPES); |
635 | |
636 | // consult the type flags |
637 | return (pTable->rgTypeFlags[uType] & HNDF_EXTRAINFO); |
638 | } |
639 | |
640 | |
641 | /* |
642 | * TableCanFreeSegmentNow |
643 | * |
644 | * Determines if it is OK to free the specified segment at this time. |
645 | * |
646 | */ |
647 | BOOL TableCanFreeSegmentNow(HandleTable *pTable, TableSegment *pSegment); |
648 | |
649 | |
650 | /* |
651 | * BlockIsLocked |
652 | * |
653 | * Determines if the lock count for the specified block is currently non-zero. |
654 | * |
655 | */ |
656 | __inline BOOL BlockIsLocked(TableSegment *pSegment, uint32_t uBlock) |
657 | { |
658 | LIMITED_METHOD_CONTRACT; |
659 | |
660 | // sanity |
661 | _ASSERTE(uBlock < HANDLE_BLOCKS_PER_SEGMENT); |
662 | |
663 | // fetch the lock count and compare it to zero |
664 | return (pSegment->rgLocks[uBlock] != 0); |
665 | } |
666 | |
667 | |
668 | /* |
669 | * BlockLock |
670 | * |
671 | * Increases the lock count for a block. |
672 | * |
673 | */ |
674 | __inline void BlockLock(TableSegment *pSegment, uint32_t uBlock) |
675 | { |
676 | LIMITED_METHOD_CONTRACT; |
677 | |
678 | // fetch the old lock count |
679 | uint8_t bLocks = pSegment->rgLocks[uBlock]; |
680 | |
681 | // assert if we are about to trash the count |
682 | _ASSERTE(bLocks < 0xFF); |
683 | |
684 | // store the incremented lock count |
685 | pSegment->rgLocks[uBlock] = bLocks + 1; |
686 | } |
687 | |
688 | |
689 | /* |
690 | * BlockUnlock |
691 | * |
692 | * Decreases the lock count for a block. |
693 | * |
694 | */ |
695 | __inline void BlockUnlock(TableSegment *pSegment, uint32_t uBlock) |
696 | { |
697 | LIMITED_METHOD_CONTRACT; |
698 | |
699 | // fetch the old lock count |
700 | uint8_t bLocks = pSegment->rgLocks[uBlock]; |
701 | |
702 | // assert if we are about to trash the count |
703 | _ASSERTE(bLocks > 0); |
704 | |
705 | // store the decremented lock count |
706 | pSegment->rgLocks[uBlock] = bLocks - 1; |
707 | } |
708 | |
709 | |
710 | /* |
711 | * BlockFetchUserDataPointer |
712 | * |
713 | * Gets the user data pointer for the first handle in a block. |
714 | * |
715 | */ |
716 | PTR_uintptr_t (PTR__TableSegmentHeader pSegment, uint32_t uBlock, BOOL fAssertOnError); |
717 | |
718 | |
719 | /* |
720 | * HandleValidateAndFetchUserDataPointer |
721 | * |
722 | * Gets the user data pointer for a handle. |
723 | * ASSERTs and returns NULL if handle is not of the expected type. |
724 | * |
725 | */ |
726 | uintptr_t *HandleValidateAndFetchUserDataPointer(OBJECTHANDLE handle, uint32_t uTypeExpected); |
727 | |
728 | |
729 | /* |
730 | * HandleQuickFetchUserDataPointer |
731 | * |
732 | * Gets the user data pointer for a handle. |
733 | * Less validation is performed. |
734 | * |
735 | */ |
736 | PTR_uintptr_t HandleQuickFetchUserDataPointer(OBJECTHANDLE handle); |
737 | |
738 | |
739 | /* |
740 | * HandleQuickSetUserData |
741 | * |
742 | * Stores user data with a handle. |
743 | * Less validation is performed. |
744 | * |
745 | */ |
746 | void HandleQuickSetUserData(OBJECTHANDLE handle, uintptr_t lUserData); |
747 | |
748 | |
749 | /* |
750 | * HandleFetchType |
751 | * |
752 | * Computes the type index for a given handle. |
753 | * |
754 | */ |
755 | uint32_t HandleFetchType(OBJECTHANDLE handle); |
756 | |
757 | |
758 | /* |
759 | * HandleFetchHandleTable |
760 | * |
761 | * Returns the containing handle table of a given handle. |
762 | * |
763 | */ |
764 | PTR_HandleTable HandleFetchHandleTable(OBJECTHANDLE handle); |
765 | |
766 | |
767 | /* |
768 | * SegmentAlloc |
769 | * |
770 | * Allocates a new segment. |
771 | * |
772 | */ |
773 | TableSegment *SegmentAlloc(HandleTable *pTable); |
774 | |
775 | |
776 | /* |
777 | * SegmentFree |
778 | * |
779 | * Frees the specified segment. |
780 | * |
781 | */ |
782 | void SegmentFree(TableSegment *pSegment); |
783 | |
784 | /* |
785 | * TableHandleAsyncPinHandles |
786 | * |
787 | * Mark ready for all non-pending OverlappedData that get moved to default domain. |
788 | * |
789 | */ |
790 | BOOL TableHandleAsyncPinHandles(HandleTable *pTable, const AsyncPinCallbackContext& callbackCtx); |
791 | |
792 | /* |
793 | * TableRelocateAsyncPinHandles |
794 | * |
795 | * Replaces async pin handles with ones in default domain. |
796 | * |
797 | */ |
798 | void TableRelocateAsyncPinHandles(HandleTable *pTable, HandleTable *pTargetTable, void (*clearIfComplete)(Object*), void (*setHandle)(Object*, OBJECTHANDLE)); |
799 | |
800 | /* |
801 | * Check if a handle is part of a HandleTable |
802 | */ |
803 | BOOL TableContainHandle(HandleTable *pTable, OBJECTHANDLE handle); |
804 | |
805 | /* |
806 | * SegmentRemoveFreeBlocks |
807 | * |
808 | * Removes a block from a block list in a segment. The block is returned to |
809 | * the segment's free list. |
810 | * |
811 | */ |
812 | void SegmentRemoveFreeBlocks(TableSegment *pSegment, uint32_t uType); |
813 | |
814 | |
815 | /* |
816 | * SegmentResortChains |
817 | * |
818 | * Sorts the block chains for optimal scanning order. |
819 | * Sorts the free list to combat fragmentation. |
820 | * |
821 | */ |
822 | void SegmentResortChains(TableSegment *pSegment); |
823 | |
824 | |
825 | /* |
826 | * DoesSegmentNeedsToTrimExcessPages |
827 | * |
828 | * Checks to see if any pages can be decommitted from the segment. |
829 | * |
830 | */ |
831 | BOOL DoesSegmentNeedsToTrimExcessPages(TableSegment *pSegment); |
832 | |
833 | /* |
834 | * SegmentTrimExcessPages |
835 | * |
836 | * Checks to see if any pages can be decommitted from the segment. |
837 | * In case there any unused pages it goes and decommits them. |
838 | * |
839 | */ |
840 | void SegmentTrimExcessPages(TableSegment *pSegment); |
841 | |
842 | |
843 | /* |
844 | * TableAllocBulkHandles |
845 | * |
846 | * Attempts to allocate the requested number of handes of the specified type. |
847 | * |
848 | * Returns the number of handles that were actually allocated. This is always |
849 | * the same as the number of handles requested except in out-of-memory conditions, |
850 | * in which case it is the number of handles that were successfully allocated. |
851 | * |
852 | */ |
853 | uint32_t TableAllocBulkHandles(HandleTable *pTable, uint32_t uType, OBJECTHANDLE *pHandleBase, uint32_t uCount); |
854 | |
855 | |
856 | /* |
857 | * TableFreeBulkPreparedHandles |
858 | * |
859 | * Frees an array of handles of the specified type. |
860 | * |
861 | * This routine is optimized for a sorted array of handles but will accept any order. |
862 | * |
863 | */ |
864 | void TableFreeBulkPreparedHandles(HandleTable *pTable, uint32_t uType, OBJECTHANDLE *pHandleBase, uint32_t uCount); |
865 | |
866 | |
867 | /* |
868 | * TableFreeBulkUnpreparedHandles |
869 | * |
870 | * Frees an array of handles of the specified type by preparing them and calling TableFreeBulkPreparedHandles. |
871 | * |
872 | */ |
873 | void TableFreeBulkUnpreparedHandles(HandleTable *pTable, uint32_t uType, const OBJECTHANDLE *pHandles, uint32_t uCount); |
874 | |
875 | /*--------------------------------------------------------------------------*/ |
876 | |
877 | |
878 | |
879 | /**************************************************************************** |
880 | * |
881 | * HANDLE CACHE |
882 | * |
883 | ****************************************************************************/ |
884 | |
885 | /* |
886 | * TableAllocSingleHandleFromCache |
887 | * |
888 | * Gets a single handle of the specified type from the handle table by |
889 | * trying to fetch it from the reserve cache for that handle type. If the |
890 | * reserve cache is empty, this routine calls TableCacheMissOnAlloc. |
891 | * |
892 | */ |
893 | OBJECTHANDLE TableAllocSingleHandleFromCache(HandleTable *pTable, uint32_t uType); |
894 | |
895 | |
896 | /* |
897 | * TableFreeSingleHandleToCache |
898 | * |
899 | * Returns a single handle of the specified type to the handle table |
900 | * by trying to store it in the free cache for that handle type. If the |
901 | * free cache is full, this routine calls TableCacheMissOnFree. |
902 | * |
903 | */ |
904 | void TableFreeSingleHandleToCache(HandleTable *pTable, uint32_t uType, OBJECTHANDLE handle); |
905 | |
906 | |
907 | /* |
908 | * TableAllocHandlesFromCache |
909 | * |
910 | * Allocates multiple handles of the specified type by repeatedly |
911 | * calling TableAllocSingleHandleFromCache. |
912 | * |
913 | */ |
914 | uint32_t TableAllocHandlesFromCache(HandleTable *pTable, uint32_t uType, OBJECTHANDLE *pHandleBase, uint32_t uCount); |
915 | |
916 | |
917 | /* |
918 | * TableFreeHandlesToCache |
919 | * |
920 | * Frees multiple handles of the specified type by repeatedly |
921 | * calling TableFreeSingleHandleToCache. |
922 | * |
923 | */ |
924 | void TableFreeHandlesToCache(HandleTable *pTable, uint32_t uType, const OBJECTHANDLE *pHandleBase, uint32_t uCount); |
925 | |
926 | /*--------------------------------------------------------------------------*/ |
927 | |
928 | |
929 | |
930 | /**************************************************************************** |
931 | * |
932 | * TABLE SCANNING |
933 | * |
934 | ****************************************************************************/ |
935 | |
936 | /* |
937 | * TableScanHandles |
938 | * |
939 | * Implements the core handle scanning loop for a table. |
940 | * |
941 | */ |
942 | void CALLBACK TableScanHandles(PTR_HandleTable pTable, |
943 | const uint32_t *puType, |
944 | uint32_t uTypeCount, |
945 | SEGMENTITERATOR pfnSegmentIterator, |
946 | BLOCKSCANPROC pfnBlockHandler, |
947 | ScanCallbackInfo *pInfo, |
948 | CrstHolderWithState *pCrstHolder); |
949 | |
950 | |
951 | /* |
952 | * xxxTableScanHandlesAsync |
953 | * |
954 | * Implements asynchronous handle scanning for a table. |
955 | * |
956 | */ |
957 | void CALLBACK xxxTableScanHandlesAsync(PTR_HandleTable pTable, |
958 | const uint32_t *puType, |
959 | uint32_t uTypeCount, |
960 | SEGMENTITERATOR pfnSegmentIterator, |
961 | BLOCKSCANPROC pfnBlockHandler, |
962 | ScanCallbackInfo *pInfo, |
963 | CrstHolderWithState *pCrstHolder); |
964 | |
965 | |
966 | /* |
967 | * TypesRequireUserDataScanning |
968 | * |
969 | * Determines whether the set of types listed should get user data during scans |
970 | * |
971 | * if ALL types passed have user data then this function will enable user data support |
972 | * otherwise it will disable user data support |
973 | * |
974 | * IN OTHER WORDS, SCANNING WITH A MIX OF USER-DATA AND NON-USER-DATA TYPES IS NOT SUPPORTED |
975 | * |
976 | */ |
977 | BOOL TypesRequireUserDataScanning(HandleTable *pTable, const uint32_t *types, uint32_t typeCount); |
978 | |
979 | |
980 | /* |
981 | * BuildAgeMask |
982 | * |
983 | * Builds an age mask to be used when examining/updating the write barrier. |
984 | * |
985 | */ |
986 | uint32_t BuildAgeMask(uint32_t uGen, uint32_t uMaxGen); |
987 | |
988 | |
989 | /* |
990 | * QuickSegmentIterator |
991 | * |
992 | * Returns the next segment to be scanned in a scanning loop. |
993 | * |
994 | */ |
995 | PTR_TableSegment CALLBACK QuickSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *pCrstHolder = 0); |
996 | |
997 | |
998 | /* |
999 | * StandardSegmentIterator |
1000 | * |
1001 | * Returns the next segment to be scanned in a scanning loop. |
1002 | * |
1003 | * This iterator performs some maintenance on the segments, |
1004 | * primarily making sure the block chains are sorted so that |
1005 | * g0 scans are more likely to operate on contiguous blocks. |
1006 | * |
1007 | */ |
1008 | PTR_TableSegment CALLBACK StandardSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *pCrstHolder = 0); |
1009 | |
1010 | |
1011 | /* |
1012 | * FullSegmentIterator |
1013 | * |
1014 | * Returns the next segment to be scanned in a scanning loop. |
1015 | * |
1016 | * This iterator performs full maintenance on the segments, |
1017 | * including freeing those it notices are empty along the way. |
1018 | * |
1019 | */ |
1020 | PTR_TableSegment CALLBACK FullSegmentIterator(PTR_HandleTable pTable, PTR_TableSegment pPrevSegment, CrstHolderWithState *pCrstHolder = 0); |
1021 | |
1022 | |
1023 | /* |
1024 | * BlockScanBlocksWithoutUserData |
1025 | * |
1026 | * Calls the specified callback for each handle, optionally aging the corresponding generation clumps. |
1027 | * NEVER propagates per-handle user data to the callback. |
1028 | * |
1029 | */ |
1030 | void CALLBACK BlockScanBlocksWithoutUserData(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo); |
1031 | |
1032 | |
1033 | /* |
1034 | * BlockScanBlocksWithUserData |
1035 | * |
1036 | * Calls the specified callback for each handle, optionally aging the corresponding generation clumps. |
1037 | * ALWAYS propagates per-handle user data to the callback. |
1038 | * |
1039 | */ |
1040 | void CALLBACK BlockScanBlocksWithUserData(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo); |
1041 | |
1042 | |
1043 | /* |
1044 | * BlockScanBlocksEphemeral |
1045 | * |
1046 | * Calls the specified callback for each handle from the specified generation. |
1047 | * Propagates per-handle user data to the callback if present. |
1048 | * |
1049 | */ |
1050 | void CALLBACK BlockScanBlocksEphemeral(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo); |
1051 | |
1052 | |
1053 | /* |
1054 | * BlockAgeBlocks |
1055 | * |
1056 | * Ages all clumps in a range of consecutive blocks. |
1057 | * |
1058 | */ |
1059 | void CALLBACK BlockAgeBlocks(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo); |
1060 | |
1061 | |
1062 | /* |
1063 | * BlockAgeBlocksEphemeral |
1064 | * |
1065 | * Ages all clumps within the specified generation. |
1066 | * |
1067 | */ |
1068 | void CALLBACK BlockAgeBlocksEphemeral(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo); |
1069 | |
1070 | |
1071 | /* |
1072 | * BlockResetAgeMapForBlocks |
1073 | * |
1074 | * Clears the age maps for a range of blocks. |
1075 | * |
1076 | */ |
1077 | void CALLBACK BlockResetAgeMapForBlocks(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo); |
1078 | |
1079 | |
1080 | /* |
1081 | * BlockVerifyAgeMapForBlocks |
1082 | * |
1083 | * Verifies the age maps for a range of blocks, and also validates the objects pointed to. |
1084 | * |
1085 | */ |
1086 | void CALLBACK BlockVerifyAgeMapForBlocks(PTR_TableSegment pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo); |
1087 | |
1088 | |
1089 | /* |
1090 | * xxxAsyncSegmentIterator |
1091 | * |
1092 | * Implements the core handle scanning loop for a table. |
1093 | * |
1094 | */ |
1095 | PTR_TableSegment CALLBACK xxxAsyncSegmentIterator(PTR_HandleTable pTable, TableSegment *pPrevSegment, CrstHolderWithState *pCrstHolder); |
1096 | |
1097 | /*--------------------------------------------------------------------------*/ |
1098 | |