1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4
5/*
6 * Generational GC handle manager. Core Table Implementation.
7 *
8 * Implementation of core table management routines.
9 *
10
11 *
12 */
13
14#include "common.h"
15
16#include "gcenv.h"
17#include "gcenv.inl"
18#include "gc.h"
19#include "handletablepriv.h"
20
21/****************************************************************************
22 *
23 * RANDOM HELPERS
24 *
25 ****************************************************************************/
26
27const uint8_t c_rgLowBitIndex[256] =
28{
29 0xff, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
30 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
31 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
32 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
33 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
34 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
35 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
36 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
37 0x06, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
38 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
39 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
40 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
41 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
42 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
43 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
44 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
45 0x07, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
46 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
47 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
48 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
49 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
50 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
51 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
52 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
53 0x06, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
54 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
55 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
56 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
57 0x05, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
58 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
59 0x04, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
60 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00,
61};
62
63#ifndef DACCESS_COMPILE
64
65/*
66 * A 32/64 neutral quicksort
67 */
68//<TODO>@TODO: move/merge into common util file</TODO>
69typedef int (*PFNCOMPARE)(uintptr_t p, uintptr_t q);
70void QuickSort(uintptr_t *pData, int left, int right, PFNCOMPARE pfnCompare)
71{
72 WRAPPER_NO_CONTRACT;
73
74 do
75 {
76 int i = left;
77 int j = right;
78
79 uintptr_t x = pData[(i + j + 1) / 2];
80
81 do
82 {
83 while (pfnCompare(pData[i], x) < 0)
84 i++;
85
86 while (pfnCompare(x, pData[j]) < 0)
87 j--;
88
89 if (i > j)
90 break;
91
92 if (i < j)
93 {
94 uintptr_t t = pData[i];
95 pData[i] = pData[j];
96 pData[j] = t;
97 }
98
99 i++;
100 j--;
101
102 } while (i <= j);
103
104 if ((j - left) <= (right - i))
105 {
106 if (left < j)
107 QuickSort(pData, left, j, pfnCompare);
108
109 left = i;
110 }
111 else
112 {
113 if (i < right)
114 QuickSort(pData, i, right, pfnCompare);
115
116 right = j;
117 }
118
119 } while (left < right);
120}
121
122
123/*
124 * CompareHandlesByFreeOrder
125 *
126 * Returns:
127 * <0 - handle P should be freed before handle Q
128 * =0 - handles are eqivalent for free order purposes
129 * >0 - handle Q should be freed before handle P
130 *
131 */
132int CompareHandlesByFreeOrder(uintptr_t p, uintptr_t q)
133{
134 LIMITED_METHOD_CONTRACT;
135
136 // compute the segments for the handles
137 TableSegment *pSegmentP = (TableSegment *)(p & HANDLE_SEGMENT_ALIGN_MASK);
138 TableSegment *pSegmentQ = (TableSegment *)(q & HANDLE_SEGMENT_ALIGN_MASK);
139
140 // are the handles in the same segment?
141 if (pSegmentP == pSegmentQ)
142 {
143 // return the in-segment handle free order
144 return (int)((intptr_t)q - (intptr_t)p);
145 }
146 else if (pSegmentP)
147 {
148 // do we have two valid segments?
149 if (pSegmentQ)
150 {
151 // return the sequence order of the two segments
152 return (int)(uint32_t)pSegmentQ->bSequence - (int)(uint32_t)pSegmentP->bSequence;
153 }
154 else
155 {
156 // only the P handle is valid - free Q first
157 return 1;
158 }
159 }
160 else if (pSegmentQ)
161 {
162 // only the Q handle is valid - free P first
163 return -1;
164 }
165
166 // neither handle is valid
167 return 0;
168}
169
170
171/*
172 * ZeroHandles
173 *
174 * Zeroes the object pointers for an array of handles.
175 *
176 */
177void ZeroHandles(OBJECTHANDLE *pHandleBase, uint32_t uCount)
178{
179 LIMITED_METHOD_CONTRACT;
180
181 // compute our stopping point
182 OBJECTHANDLE *pLastHandle = pHandleBase + uCount;
183
184 // loop over the array, zeroing as we go
185 while (pHandleBase < pLastHandle)
186 {
187 // get the current handle from the array
188 OBJECTHANDLE handle = *pHandleBase;
189
190 // advance to the next handle
191 pHandleBase++;
192
193 // zero the handle's object pointer
194 *(_UNCHECKED_OBJECTREF *)handle = NULL;
195 }
196}
197
198#ifdef _DEBUG
199void CALLBACK DbgCountEnumeratedBlocks(TableSegment *pSegment, uint32_t uBlock, uint32_t uCount, ScanCallbackInfo *pInfo)
200{
201 LIMITED_METHOD_CONTRACT;
202 UNREFERENCED_PARAMETER(pSegment);
203 UNREFERENCED_PARAMETER(uBlock);
204
205 // accumulate the block count in pInfo->param1
206 pInfo->param1 += uCount;
207}
208#endif
209
210/*--------------------------------------------------------------------------*/
211
212
213
214/****************************************************************************
215 *
216 * CORE TABLE MANAGEMENT
217 *
218 ****************************************************************************/
219
220/*
221 * TableCanFreeSegmentNow
222 *
223 * Determines if it is OK to free the specified segment at this time.
224 *
225 */
226BOOL TableCanFreeSegmentNow(HandleTable *pTable, TableSegment *pSegment)
227{
228 LIMITED_METHOD_CONTRACT;
229
230 // sanity
231 _ASSERTE(pTable);
232 _ASSERTE(pSegment);
233#ifdef _DEBUG
234 // there have been cases in the past where the original assert would
235 // fail but by the time a dump was created the lock was unowned so
236 // there was no way to tell who the previous owner was.
237 EEThreadId threadId = pTable->Lock.GetHolderThreadId();
238 _ASSERTE(threadId.IsCurrentThread());
239#endif // _DEBUG
240
241 // deterine if any segment is currently being scanned asynchronously
242 TableSegment *pSegmentAsync = NULL;
243
244 // do we have async info?
245 AsyncScanInfo *pAsyncInfo = pTable->pAsyncScanInfo;
246 if (pAsyncInfo)
247 {
248 // must always have underlying callback info in an async scan
249 _ASSERTE(pAsyncInfo->pCallbackInfo);
250
251 // yes - if a segment is being scanned asynchronously it is listed here
252 pSegmentAsync = pAsyncInfo->pCallbackInfo->pCurrentSegment;
253 }
254
255 // we can free our segment if it isn't being scanned asynchronously right now
256 return (pSegment != pSegmentAsync);
257}
258
259#endif // !DACCESS_COMPILE
260
261/*
262 * BlockFetchUserDataPointer
263 *
264 * Gets the user data pointer for the first handle in a block.
265 *
266 */
267PTR_uintptr_t BlockFetchUserDataPointer(PTR__TableSegmentHeader pSegment, uint32_t uBlock, BOOL fAssertOnError)
268{
269 LIMITED_METHOD_DAC_CONTRACT;
270
271 // assume NULL until we actually find the data
272 PTR_uintptr_t pUserData = NULL;
273 // get the user data index for this block
274 uint32_t blockIndex = pSegment->rgUserData[uBlock];
275
276 // is there user data for the block?
277 if (blockIndex != BLOCK_INVALID)
278 {
279 // In DAC builds, we may not have the entire segment table mapped and in any case it will be quite
280 // large. Since we only need one element, we'll retrieve just that one element.
281 pUserData = PTR_uintptr_t(PTR_TO_TADDR(pSegment) + offsetof(TableSegment, rgValue) +
282 (blockIndex * HANDLE_BYTES_PER_BLOCK));
283 }
284 else if (fAssertOnError)
285 {
286 // no user data is associated with this block
287 //
288 // we probably got here for one of the following reasons:
289 // 1) an outside caller tried to do a user data operation on an incompatible handle
290 // 2) the user data map in the segment is corrupt
291 // 3) the global type flags are corrupt
292 //
293 _ASSERTE(FALSE);
294 }
295
296 // return the result
297 return pUserData;
298}
299
300
301/*
302 * HandleFetchSegmentPointer
303 *
304 * Computes the segment pointer for a given handle.
305 *
306 */
307__inline PTR__TableSegmentHeader HandleFetchSegmentPointer(OBJECTHANDLE handle)
308{
309 LIMITED_METHOD_DAC_CONTRACT;
310
311 // find the segment for this handle
312 PTR__TableSegmentHeader pSegment = PTR__TableSegmentHeader((uintptr_t)handle & HANDLE_SEGMENT_ALIGN_MASK);
313
314 // sanity
315 _ASSERTE(pSegment);
316
317 // return the segment pointer
318 return pSegment;
319}
320
321
322/*
323 * HandleValidateAndFetchUserDataPointer
324 *
325 * Gets the user data pointer for the specified handle.
326 * ASSERTs and returns NULL if handle is not of the expected type.
327 *
328 */
329uintptr_t *HandleValidateAndFetchUserDataPointer(OBJECTHANDLE handle, uint32_t uTypeExpected)
330{
331 WRAPPER_NO_CONTRACT;
332
333 // get the segment for this handle
334 PTR__TableSegmentHeader pSegment = HandleFetchSegmentPointer(handle);
335
336 // find the offset of this handle into the segment
337 uintptr_t offset = (uintptr_t)handle & HANDLE_SEGMENT_CONTENT_MASK;
338
339 // make sure it is in the handle area and not the header
340 _ASSERTE(offset >= HANDLE_HEADER_SIZE);
341
342 // convert the offset to a handle index
343 uint32_t uHandle = (uint32_t)((offset - HANDLE_HEADER_SIZE) / HANDLE_SIZE);
344
345 // compute the block this handle resides in
346 uint32_t uBlock = uHandle / HANDLE_HANDLES_PER_BLOCK;
347
348 // fetch the user data for this block
349 PTR_uintptr_t pUserData = BlockFetchUserDataPointer(pSegment, uBlock, TRUE);
350
351 // did we get the user data block?
352 if (pUserData)
353 {
354 // yup - adjust the pointer to be handle-specific
355 pUserData += (uHandle - (uBlock * HANDLE_HANDLES_PER_BLOCK));
356
357 // validate the block type before returning the pointer
358 if (pSegment->rgBlockType[uBlock] != uTypeExpected)
359 {
360 // type mismatch - caller error
361 _ASSERTE(FALSE);
362
363 // don't return a pointer to the caller
364 pUserData = NULL;
365 }
366 }
367
368 // return the result
369 return pUserData;
370}
371
372/*
373 * HandleQuickFetchUserDataPointer
374 *
375 * Gets the user data pointer for a handle.
376 * Less validation is performed.
377 *
378 */
379PTR_uintptr_t HandleQuickFetchUserDataPointer(OBJECTHANDLE handle)
380{
381 WRAPPER_NO_CONTRACT;
382
383 /*
384 NOTHROW;
385 GC_NOTRIGGER;
386 MODE_ANY;
387 */
388 SUPPORTS_DAC;
389
390 // get the segment for this handle
391 PTR__TableSegmentHeader pSegment = HandleFetchSegmentPointer(handle);
392
393 // find the offset of this handle into the segment
394 uintptr_t offset = (uintptr_t)handle & HANDLE_SEGMENT_CONTENT_MASK;
395
396 // make sure it is in the handle area and not the header
397 _ASSERTE(offset >= HANDLE_HEADER_SIZE);
398
399 // convert the offset to a handle index
400 uint32_t uHandle = (uint32_t)((offset - HANDLE_HEADER_SIZE) / HANDLE_SIZE);
401
402 // compute the block this handle resides in
403 uint32_t uBlock = uHandle / HANDLE_HANDLES_PER_BLOCK;
404
405 // fetch the user data for this block
406 PTR_uintptr_t pUserData = BlockFetchUserDataPointer(pSegment, uBlock, TRUE);
407
408 // if we got the user data block then adjust the pointer to be handle-specific
409 if (pUserData)
410 pUserData += (uHandle - (uBlock * HANDLE_HANDLES_PER_BLOCK));
411
412 // return the result
413 return pUserData;
414}
415
416#ifndef DACCESS_COMPILE
417/*
418 * HandleQuickSetUserData
419 *
420 * Stores user data with a handle.
421 *
422 */
423void HandleQuickSetUserData(OBJECTHANDLE handle, uintptr_t lUserData)
424{
425 WRAPPER_NO_CONTRACT;
426
427 /*
428 NOTHROW;
429 GC_NOTRIGGER;
430 MODE_ANY;
431 */
432
433 // fetch the user data slot for this handle
434 uintptr_t *pUserData = HandleQuickFetchUserDataPointer(handle);
435
436 // is there a slot?
437 if (pUserData)
438 {
439 // yes - store the info
440 *pUserData = lUserData;
441 }
442}
443
444#endif // !DACCESS_COMPILE
445
446/*
447 * HandleFetchType
448 *
449 * Computes the type index for a given handle.
450 *
451 */
452uint32_t HandleFetchType(OBJECTHANDLE handle)
453{
454 WRAPPER_NO_CONTRACT;
455
456 // get the segment for this handle
457 PTR__TableSegmentHeader pSegment = HandleFetchSegmentPointer(handle);
458
459 // find the offset of this handle into the segment
460 uintptr_t offset = (uintptr_t)handle & HANDLE_SEGMENT_CONTENT_MASK;
461
462 // make sure it is in the handle area and not the header
463 _ASSERTE(offset >= HANDLE_HEADER_SIZE);
464
465 // convert the offset to a handle index
466 uint32_t uHandle = (uint32_t)((offset - HANDLE_HEADER_SIZE) / HANDLE_SIZE);
467
468 // compute the block this handle resides in
469 uint32_t uBlock = uHandle / HANDLE_HANDLES_PER_BLOCK;
470
471 // return the block's type
472 return pSegment->rgBlockType[uBlock];
473}
474
475/*
476 * HandleFetchHandleTable
477 *
478 * Computes the type index for a given handle.
479 *
480 */
481PTR_HandleTable HandleFetchHandleTable(OBJECTHANDLE handle)
482{
483 WRAPPER_NO_CONTRACT;
484 SUPPORTS_DAC;
485
486 // get the segment for this handle
487 PTR__TableSegmentHeader pSegment = HandleFetchSegmentPointer(handle);
488
489 // return the table
490 return pSegment->pHandleTable;
491}
492
493#ifndef DACCESS_COMPILE
494/*
495 * SegmentInitialize
496 *
497 * Initializes a segment.
498 *
499 */
500BOOL SegmentInitialize(TableSegment *pSegment, HandleTable *pTable)
501{
502 LIMITED_METHOD_CONTRACT;
503
504 /*
505 NOTHROW;
506 GC_NOTRIGGER;
507 MODE_ANY;
508 */
509
510 // we want to commit enough for the header PLUS some handles
511 size_t dwCommit = ALIGN_UP(HANDLE_HEADER_SIZE, OS_PAGE_SIZE);
512
513 // commit the header
514 if (!GCToOSInterface::VirtualCommit(pSegment, dwCommit))
515 {
516 //_ASSERTE(FALSE);
517 return FALSE;
518 }
519
520 // remember how many blocks we commited
521 pSegment->bCommitLine = (uint8_t)((dwCommit - HANDLE_HEADER_SIZE) / HANDLE_BYTES_PER_BLOCK);
522
523 // now preinitialize the 0xFF guys
524 memset(pSegment->rgGeneration, 0xFF, sizeof(pSegment->rgGeneration));
525 memset(pSegment->rgTail, BLOCK_INVALID, sizeof(pSegment->rgTail));
526 memset(pSegment->rgHint, BLOCK_INVALID, sizeof(pSegment->rgHint));
527 memset(pSegment->rgFreeMask, 0xFF, sizeof(pSegment->rgFreeMask));
528 memset(pSegment->rgBlockType, TYPE_INVALID, sizeof(pSegment->rgBlockType));
529 memset(pSegment->rgUserData, BLOCK_INVALID, sizeof(pSegment->rgUserData));
530
531 // prelink the free chain
532 _ASSERTE(FitsInU1(HANDLE_BLOCKS_PER_SEGMENT));
533 uint8_t u = 0;
534 while (u < (HANDLE_BLOCKS_PER_SEGMENT - 1))
535 {
536 uint8_t next = u + 1;
537 pSegment->rgAllocation[u] = next;
538 u = next;
539 }
540
541 // and terminate the last node
542 pSegment->rgAllocation[u] = BLOCK_INVALID;
543
544 // store the back pointer from our new segment to its owning table
545 pSegment->pHandleTable = pTable;
546
547 // all done
548 return TRUE;
549}
550
551
552/*
553 * SegmentFree
554 *
555 * Frees the specified segment.
556 *
557 */
558void SegmentFree(TableSegment *pSegment)
559{
560 WRAPPER_NO_CONTRACT;
561
562 /*
563 NOTHROW;
564 GC_NOTRIGGER;
565 MODE_ANY;
566 */
567
568 // free the segment's memory
569 GCToOSInterface::VirtualRelease(pSegment, HANDLE_SEGMENT_SIZE);
570}
571
572
573/*
574 * SegmentAlloc
575 *
576 * Allocates a new segment.
577 *
578 */
579TableSegment *SegmentAlloc(HandleTable *pTable)
580{
581 LIMITED_METHOD_CONTRACT;
582
583 /*
584 NOTHROW;
585 GC_NOTRIGGER;
586 MODE_ANY;
587 */
588
589 // allocate the segment's address space
590 TableSegment *pSegment = NULL;
591
592 // All platforms currently require 64Kb aligned table segments, which is what VirtualAlloc guarantees.
593 // The actual requirement is that the alignment of the reservation equals or exceeds the size of the
594 // reservation. This requirement stems from the method the handle table uses to map quickly from a handle
595 // address back to the handle table segment header.
596 _ASSERTE(HANDLE_SEGMENT_ALIGNMENT >= HANDLE_SEGMENT_SIZE);
597 _ASSERTE(HANDLE_SEGMENT_ALIGNMENT == 0x10000);
598
599 pSegment = (TableSegment *)GCToOSInterface::VirtualReserve(HANDLE_SEGMENT_SIZE, HANDLE_SEGMENT_ALIGNMENT, VirtualReserveFlags::None);
600 _ASSERTE(((size_t)pSegment % HANDLE_SEGMENT_ALIGNMENT) == 0);
601
602 // bail out if we couldn't get any memory
603 if (!pSegment)
604 {
605 return NULL;
606 }
607
608 // initialize the header
609 if (!SegmentInitialize(pSegment, pTable))
610 {
611 SegmentFree(pSegment);
612 pSegment = NULL;
613 }
614
615 // all done
616 return pSegment;
617}
618
619// Mark a handle being free.
620__inline void SegmentMarkFreeMask(TableSegment *pSegment, _UNCHECKED_OBJECTREF* h)
621{
622 CONTRACTL
623 {
624 NOTHROW;
625 GC_NOTRIGGER;
626 MODE_COOPERATIVE;
627 }
628 CONTRACTL_END;
629
630 uint32_t uMask = (uint32_t)(h - pSegment->rgValue);
631 uint32_t uBit = uMask % HANDLE_HANDLES_PER_MASK;
632 uMask = uMask / HANDLE_HANDLES_PER_MASK;
633 pSegment->rgFreeMask[uMask] |= (1<<uBit);
634}
635
636// Mark a handle being used.
637__inline void SegmentUnMarkFreeMask(TableSegment *pSegment, _UNCHECKED_OBJECTREF* h)
638{
639 CONTRACTL
640 {
641 NOTHROW;
642 GC_NOTRIGGER;
643 MODE_COOPERATIVE;
644 }
645 CONTRACTL_END;
646
647 uint32_t uMask = (uint32_t)(h - pSegment->rgValue);
648 uint32_t uBit = uMask % HANDLE_HANDLES_PER_MASK;
649 uMask = uMask / HANDLE_HANDLES_PER_MASK;
650 pSegment->rgFreeMask[uMask] &= ~(1<<uBit);
651}
652
653// Prepare a segment to be moved to default domain.
654// Remove all non-async pin handles.
655void SegmentPreCompactAsyncPinHandles(TableSegment *pSegment, void (*clearIfComplete)(Object*))
656{
657 CONTRACTL
658 {
659 NOTHROW;
660 GC_NOTRIGGER;
661 MODE_COOPERATIVE;
662 }
663 CONTRACTL_END;
664
665 pSegment->fResortChains = true;
666 pSegment->fNeedsScavenging = true;
667
668 // Zero out all non-async pin handles
669 uint32_t uBlock;
670 for (uBlock = 0; uBlock < pSegment->bEmptyLine; uBlock ++)
671 {
672 if (pSegment->rgBlockType[uBlock] == TYPE_INVALID)
673 {
674 continue;
675 }
676 else if (pSegment->rgBlockType[uBlock] != HNDTYPE_ASYNCPINNED)
677 {
678 _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK);
679 _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_BLOCK;
680 do
681 {
682 *pValue = NULL;
683 pValue ++;
684 } while (pValue < pLast);
685
686 ((uint32_t*)pSegment->rgGeneration)[uBlock] = (uint32_t)-1;
687
688 uint32_t *pdwMask = pSegment->rgFreeMask + (uBlock * HANDLE_MASKS_PER_BLOCK);
689 uint32_t *pdwMaskLast = pdwMask + HANDLE_MASKS_PER_BLOCK;
690 do
691 {
692 *pdwMask = MASK_EMPTY;
693 pdwMask ++;
694 } while (pdwMask < pdwMaskLast);
695
696 pSegment->rgBlockType[uBlock] = TYPE_INVALID;
697 pSegment->rgUserData[uBlock] = BLOCK_INVALID;
698 pSegment->rgLocks[uBlock] = 0;
699 }
700 }
701
702 // Return all non-async pin handles to free list
703 uint32_t uType;
704 for (uType = 0; uType < HANDLE_MAX_INTERNAL_TYPES; uType ++)
705 {
706 if (uType == HNDTYPE_ASYNCPINNED)
707 {
708 continue;
709 }
710 pSegment->rgFreeCount[uType] = 0;
711 if (pSegment->rgHint[uType] != BLOCK_INVALID)
712 {
713 uint32_t uLast = pSegment->rgHint[uType];
714 uint8_t uFirst = pSegment->rgAllocation[uLast];
715 pSegment->rgAllocation[uLast] = pSegment->bFreeList;
716 pSegment->bFreeList = uFirst;
717 pSegment->rgHint[uType] = BLOCK_INVALID;
718 pSegment->rgTail[uType] = BLOCK_INVALID;
719 }
720 }
721
722 // make sure the remaining async handle has MethodTable that exists in default domain
723 uBlock = pSegment->rgHint[HNDTYPE_ASYNCPINNED];
724 if (uBlock == BLOCK_INVALID)
725 {
726 return;
727 }
728 uint32_t freeCount = 0;
729 for (uBlock = 0; uBlock < pSegment->bEmptyLine; uBlock ++)
730 {
731 if (pSegment->rgBlockType[uBlock] != HNDTYPE_ASYNCPINNED)
732 {
733 continue;
734 }
735 if (pSegment->rgFreeMask[uBlock*2] == (uint32_t)-1 && pSegment->rgFreeMask[uBlock*2+1] == (uint32_t)-1)
736 {
737 continue;
738 }
739 _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK);
740 _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_BLOCK;
741
742 do
743 {
744 _UNCHECKED_OBJECTREF value = *pValue;
745 if (!HndIsNullOrDestroyedHandle(value))
746 {
747 clearIfComplete((Object*)value);
748 }
749 else
750 {
751 // reset free mask
752 SegmentMarkFreeMask(pSegment, pValue);
753 freeCount ++;
754 }
755 pValue ++;
756 } while (pValue != pLast);
757 }
758
759 pSegment->rgFreeCount[HNDTYPE_ASYNCPINNED] = freeCount;
760}
761
762// Copy a handle to a different segment in the same HandleTable
763BOOL SegmentCopyAsyncPinHandle(TableSegment *pSegment, _UNCHECKED_OBJECTREF *h)
764{
765 CONTRACTL
766 {
767 NOTHROW;
768 GC_NOTRIGGER;
769 MODE_COOPERATIVE;
770 }
771 CONTRACTL_END;
772
773 _ASSERTE (HandleFetchSegmentPointer((OBJECTHANDLE)h) != pSegment);
774
775 if (pSegment->rgFreeCount[HNDTYPE_ASYNCPINNED] == 0)
776 {
777 uint8_t uBlock = pSegment->bFreeList;
778 if (uBlock == BLOCK_INVALID)
779 {
780 // All slots are used up.
781 return FALSE;
782 }
783 pSegment->bFreeList = pSegment->rgAllocation[uBlock];
784 pSegment->rgBlockType[uBlock] = HNDTYPE_ASYNCPINNED;
785 pSegment->rgAllocation[uBlock] = pSegment->rgHint[HNDTYPE_ASYNCPINNED];
786 pSegment->rgHint[HNDTYPE_ASYNCPINNED] = uBlock;
787 pSegment->rgFreeCount[HNDTYPE_ASYNCPINNED] += HANDLE_HANDLES_PER_BLOCK;
788 }
789 uint8_t uBlock = pSegment->rgHint[HNDTYPE_ASYNCPINNED];
790 uint8_t uLast = uBlock;
791 do
792 {
793 uint32_t n = uBlock * (HANDLE_HANDLES_PER_BLOCK/HANDLE_HANDLES_PER_MASK);
794 uint32_t* pMask = pSegment->rgFreeMask + n;
795 if (pMask[0] != 0 || pMask[1] != 0)
796 {
797 break;
798 }
799 uBlock = pSegment->rgAllocation[uBlock];
800 } while (uBlock != uLast);
801 _ASSERTE (uBlock != uLast);
802 pSegment->rgHint[HNDTYPE_ASYNCPINNED] = uBlock;
803 _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK);
804 _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_BLOCK;
805 do
806 {
807 if (*pValue == NULL)
808 {
809 SegmentUnMarkFreeMask(pSegment,pValue);
810 *pValue = *h;
811 *h = NULL;
812 break;
813 }
814 pValue ++;
815 } while (pValue != pLast);
816 _ASSERTE (pValue != pLast);
817 pSegment->rgFreeCount[HNDTYPE_ASYNCPINNED] --;
818 return TRUE;
819}
820
821void SegmentCompactAsyncPinHandles(TableSegment *pSegment, TableSegment **ppWorkerSegment, void (*clearIfComplete)(Object*))
822{
823 CONTRACTL
824 {
825 NOTHROW;
826 GC_NOTRIGGER;
827 MODE_COOPERATIVE;
828 }
829 CONTRACTL_END;
830
831 uint32_t uBlock = pSegment->rgHint[HNDTYPE_ASYNCPINNED];
832 if (uBlock == BLOCK_INVALID)
833 {
834 return;
835 }
836 for (uBlock = 0; uBlock < pSegment->bEmptyLine; uBlock ++)
837 {
838 if (pSegment->rgBlockType[uBlock] != HNDTYPE_ASYNCPINNED)
839 {
840 continue;
841 }
842 if (pSegment->rgFreeMask[uBlock*2] == (uint32_t)-1 && pSegment->rgFreeMask[uBlock*2+1] == (uint32_t)-1)
843 {
844 continue;
845 }
846 _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK);
847 _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_BLOCK;
848
849 do
850 {
851 BOOL fNeedNewSegment = FALSE;
852 _UNCHECKED_OBJECTREF value = *pValue;
853 if (!HndIsNullOrDestroyedHandle(value))
854 {
855 clearIfComplete((Object*)value);
856 fNeedNewSegment = !SegmentCopyAsyncPinHandle(*ppWorkerSegment,pValue);
857 }
858 if (fNeedNewSegment)
859 {
860 _ASSERTE ((*ppWorkerSegment)->rgFreeCount[HNDTYPE_ASYNCPINNED] == 0 &&
861 (*ppWorkerSegment)->bFreeList == BLOCK_INVALID);
862 TableSegment *pNextSegment = (*ppWorkerSegment)->pNextSegment;
863 SegmentPreCompactAsyncPinHandles(pNextSegment, clearIfComplete);
864 *ppWorkerSegment = pNextSegment;
865 if (pNextSegment == pSegment)
866 {
867 // The current segment will be moved to default domain.
868 return;
869 }
870 }
871 else
872 {
873 pValue ++;
874 }
875 } while (pValue != pLast);
876 }
877}
878
879
880// Mark AsyncPinHandles ready to be cleaned when the marker job is processed
881BOOL SegmentHandleAsyncPinHandles (TableSegment *pSegment, const AsyncPinCallbackContext &callbackCtx)
882{
883 CONTRACTL
884 {
885 GC_NOTRIGGER;
886 NOTHROW;
887 MODE_COOPERATIVE;
888 }
889 CONTRACTL_END;
890
891 uint32_t uBlock = pSegment->rgHint[HNDTYPE_ASYNCPINNED];
892 if (uBlock == BLOCK_INVALID)
893 {
894 // There is no pinning handles.
895 return FALSE;
896 }
897
898 BOOL result = FALSE;
899
900 for (uBlock = 0; uBlock < pSegment->bEmptyLine; uBlock ++)
901 {
902 if (pSegment->rgBlockType[uBlock] != HNDTYPE_ASYNCPINNED)
903 {
904 continue;
905 }
906 if (pSegment->rgFreeMask[uBlock*2] == (uint32_t)-1 && pSegment->rgFreeMask[uBlock*2+1] == (uint32_t)-1)
907 {
908 continue;
909 }
910 _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK);
911 _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_BLOCK;
912
913 do
914 {
915 _UNCHECKED_OBJECTREF value = *pValue;
916 if (!HndIsNullOrDestroyedHandle(value))
917 {
918 // calls back into the VM using the callback given to
919 // Ref_HandleAsyncPinHandles
920 if (callbackCtx.Invoke((Object*)value))
921 {
922 result = TRUE;
923 }
924 }
925 pValue ++;
926 } while (pValue != pLast);
927 }
928
929 return result;
930}
931
932// Replace an async pin handle with one from default domain
933bool SegmentRelocateAsyncPinHandles (TableSegment *pSegment,
934 HandleTable *pTargetTable,
935 void (*clearIfComplete)(Object*),
936 void (*setHandle)(Object*, OBJECTHANDLE))
937{
938 CONTRACTL
939 {
940 GC_NOTRIGGER;
941 NOTHROW;
942 MODE_COOPERATIVE;
943 }
944 CONTRACTL_END;
945
946 uint32_t uBlock = pSegment->rgHint[HNDTYPE_ASYNCPINNED];
947 if (uBlock == BLOCK_INVALID)
948 {
949 // There is no pinning handles.
950 return true;
951 }
952 for (uBlock = 0; uBlock < pSegment->bEmptyLine; uBlock ++)
953 {
954 if (pSegment->rgBlockType[uBlock] != HNDTYPE_ASYNCPINNED)
955 {
956 continue;
957 }
958 if (pSegment->rgFreeMask[uBlock*2] == (uint32_t)-1 && pSegment->rgFreeMask[uBlock*2+1] == (uint32_t)-1)
959 {
960 continue;
961 }
962 _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK);
963 _UNCHECKED_OBJECTREF *pLast = pValue + HANDLE_HANDLES_PER_BLOCK;
964
965 do
966 {
967 _UNCHECKED_OBJECTREF value = *pValue;
968 if (!HndIsNullOrDestroyedHandle(value))
969 {
970 clearIfComplete((Object*)value);
971 OBJECTHANDLE selfHandle = HndCreateHandle((HHANDLETABLE)pTargetTable, HNDTYPE_ASYNCPINNED, ObjectToOBJECTREF(value));
972 if (!selfHandle)
973 {
974 // failed to allocate a new handle - callers have to handle this.
975 return false;
976 }
977
978 setHandle((Object*)value, selfHandle);
979 *pValue = NULL;
980 }
981 pValue ++;
982 } while (pValue != pLast);
983 }
984
985 return true;
986}
987
988// Mark all non-pending AsyncPinHandle ready for cleanup.
989// We will queue a marker Overlapped to io completion port. We use the marker
990// to make sure that all iocompletion jobs before this marker have been processed.
991// After that we can free the async pinned handles.
992BOOL TableHandleAsyncPinHandles(HandleTable *pTable, const AsyncPinCallbackContext &callbackCtx)
993{
994 CONTRACTL
995 {
996 GC_NOTRIGGER;
997 NOTHROW;
998 MODE_COOPERATIVE;
999 }
1000 CONTRACTL_END;
1001
1002 BOOL result = FALSE;
1003 TableSegment *pSegment = pTable->pSegmentList;
1004
1005 CrstHolder ch(&pTable->Lock);
1006
1007 while (pSegment)
1008 {
1009 if (SegmentHandleAsyncPinHandles (pSegment, callbackCtx))
1010 {
1011 result = TRUE;
1012 }
1013 pSegment = pSegment->pNextSegment;
1014 }
1015
1016 return result;
1017}
1018
1019// Keep needed async Pin Handle by moving them to default domain.
1020// Strategy:
1021// 1. Try to create pin handles in default domain to replace it.
1022// 2. If 1 failed due to OOM, we will relocate segments from this HandleTable to default domain.
1023// a. Clean the segment so that only saved pin handles exist. This segment becomes the worker segment.
1024// b. Copy pin handles from remaining segments to the worker segment. If worker segment is full, start
1025// from a again.
1026// c. After copying all handles to worker segments, move the segments to default domain.
1027// It is very important that in step 2, we should not fail for OOM, which means no memory allocation.
1028void TableRelocateAsyncPinHandles(HandleTable *pTable,
1029 HandleTable *pTargetTable,
1030 void (*clearIfComplete)(Object*),
1031 void (*setHandle)(Object*, OBJECTHANDLE))
1032{
1033 CONTRACTL
1034 {
1035 GC_TRIGGERS;
1036 NOTHROW;
1037 MODE_COOPERATIVE;
1038 }
1039 CONTRACTL_END;
1040
1041 _ASSERTE (pTargetTable->uADIndex == ADIndex(GCToEEInterface::GetDefaultDomainIndex())); // must be for default domain
1042
1043 BOOL fGotException = FALSE;
1044 TableSegment *pSegment = pTable->pSegmentList;
1045 bool wasSuccessful = true;
1046
1047#ifdef _DEBUG
1048 // on debug builds, execute the OOM path 10% of the time.
1049 if (GetRandomInt(100) < 10)
1050 goto SLOW_PATH;
1051#endif
1052
1053 // Step 1: replace pinning handles with ones from default domain
1054 while (pSegment)
1055 {
1056 wasSuccessful = wasSuccessful && SegmentRelocateAsyncPinHandles (pSegment, pTargetTable, clearIfComplete, setHandle);
1057 if (!wasSuccessful)
1058 {
1059 break;
1060 }
1061
1062 pSegment = pSegment->pNextSegment;
1063 }
1064
1065 if (wasSuccessful)
1066 {
1067 return;
1068 }
1069
1070#ifdef _DEBUG
1071SLOW_PATH:
1072#endif
1073
1074 // step 2: default domain runs out of space
1075 // compact all remaining pinning handles and move the segments to default domain
1076
1077 while (true)
1078 {
1079 CrstHolderWithState ch(&pTable->Lock);
1080
1081 // We cannot move segments to a different table if we're asynchronously scanning the current table as
1082 // part of a concurrent GC. That's because the async table scanning code does most of its work without
1083 // the table lock held. So we'll take the table lock and then look to see if we're in a concurrent GC.
1084 // If we are we'll back out and try again. This doesn't prevent a concurrent GC from initiating while
1085 // we have the lock held but the part we care about (the async table scan) takes the table lock during
1086 // a preparation step so we'll be able to complete our segment moves before the async scan has a
1087 // chance to interfere with us (or vice versa).
1088 if (g_theGCHeap->IsConcurrentGCInProgress())
1089 {
1090 // A concurrent GC is in progress so someone might be scanning our segments asynchronously.
1091 // Release the lock, wait for the GC to complete and try again. The order is important; if we wait
1092 // before releasing the table lock we can deadlock with an async table scan.
1093 ch.Release();
1094 g_theGCHeap->WaitUntilConcurrentGCComplete();
1095 continue;
1096 }
1097
1098 // If we get here then we managed to acquire the table lock and observe that no concurrent GC was in
1099 // progress. A concurrent GC could start at any time so that state may have changed, but since we took
1100 // the table lock first we know that the GC could only have gotten as far as attempting to initiate an
1101 // async handle table scan (which attempts to acquire the table lock). So as long as we complete our
1102 // segment compaction and moves without releasing the table lock we're guaranteed to complete before
1103 // the async scan can get in and observe any of the segments.
1104
1105 // Compact async pinning handles into the smallest number of leading segments we can (the worker
1106 // segments).
1107 TableSegment *pWorkerSegment = pTable->pSegmentList;
1108 SegmentPreCompactAsyncPinHandles (pWorkerSegment, clearIfComplete);
1109
1110 pSegment = pWorkerSegment->pNextSegment;
1111 while (pSegment)
1112 {
1113 SegmentCompactAsyncPinHandles (pSegment, &pWorkerSegment, clearIfComplete);
1114 pSegment= pSegment->pNextSegment;
1115 }
1116
1117 // Empty the remaining segments.
1118 pSegment = pWorkerSegment->pNextSegment;
1119 while (pSegment)
1120 {
1121 memset(pSegment->rgValue, 0, (uint32_t)pSegment->bCommitLine * HANDLE_BYTES_PER_BLOCK);
1122 pSegment = pSegment->pNextSegment;
1123 }
1124
1125 // Move the worker segments over to the tail end of the default domain's segment list.
1126 {
1127 CrstHolder ch1(&pTargetTable->Lock);
1128
1129 // Locate the segment currently at the tail of the default domain's segment list.
1130 TableSegment *pTargetSegment = pTargetTable->pSegmentList;
1131 while (pTargetSegment->pNextSegment)
1132 {
1133 pTargetSegment = pTargetSegment->pNextSegment;
1134 }
1135
1136 // Take the worker segments and point them to their new handle table and recalculate their
1137 // sequence numbers to be consistent with the queue they're moving to.
1138 uint8_t bLastSequence = pTargetSegment->bSequence;
1139 pSegment = pTable->pSegmentList;
1140 while (pSegment != pWorkerSegment->pNextSegment)
1141 {
1142 pSegment->pHandleTable = pTargetTable;
1143 pSegment->bSequence = (uint8_t)(((uint32_t)bLastSequence + 1) % 0x100);
1144 bLastSequence = pSegment->bSequence;
1145 pSegment = pSegment->pNextSegment;
1146 }
1147
1148 // Join the worker segments to the tail of the default domain segment list.
1149 pTargetSegment->pNextSegment = pTable->pSegmentList;
1150
1151 // Reset the current handle table segment list to omit the removed worker segments and start at
1152 // the first non-worker.
1153 pTable->pSegmentList = pWorkerSegment->pNextSegment;
1154
1155 // The last worker segment is now the end of the default domain's segment list.
1156 pWorkerSegment->pNextSegment = NULL;
1157 }
1158
1159 break;
1160 }
1161}
1162
1163/*
1164 * Check if a handle is part of a HandleTable
1165 */
1166BOOL TableContainHandle(HandleTable *pTable, OBJECTHANDLE handle)
1167{
1168 _ASSERTE (handle);
1169
1170 // get the segment for this handle
1171 TableSegment *pSegment = (TableSegment *)HandleFetchSegmentPointer(handle);
1172
1173 CrstHolder ch(&pTable->Lock);
1174 TableSegment *pWorkerSegment = pTable->pSegmentList;
1175 while (pWorkerSegment)
1176 {
1177 if (pWorkerSegment == pSegment)
1178 {
1179 return TRUE;
1180 }
1181 pWorkerSegment = pWorkerSegment->pNextSegment;
1182 }
1183 return FALSE;
1184}
1185
1186/*
1187 * SegmentRemoveFreeBlocks
1188 *
1189 * Scans a segment for free blocks of the specified type
1190 * and moves them to the segment's free list.
1191 *
1192 */
1193void SegmentRemoveFreeBlocks(TableSegment *pSegment, uint32_t uType, BOOL *pfScavengeLater)
1194{
1195 WRAPPER_NO_CONTRACT;
1196
1197 /*
1198 NOTHROW;
1199 GC_NOTRIGGER;
1200 MODE_ANY;
1201 */
1202
1203 // fetch the tail block for the specified chain
1204 uint32_t uPrev = pSegment->rgTail[uType];
1205
1206 // if it's a terminator then there are no blocks in the chain
1207 if (uPrev == BLOCK_INVALID)
1208 return;
1209
1210 // we may need to clean up user data blocks later
1211 BOOL fCleanupUserData = FALSE;
1212
1213 // start iterating with the head block
1214 uint32_t uStart = pSegment->rgAllocation[uPrev];
1215 uint32_t uBlock = uStart;
1216
1217 // keep track of how many blocks we removed
1218 uint32_t uRemoved = 0;
1219
1220 // we want to preserve the relative order of any blocks we free
1221 // this is the best we can do until the free list is resorted
1222 uint32_t uFirstFreed = BLOCK_INVALID;
1223 uint32_t uLastFreed = BLOCK_INVALID;
1224
1225 // loop until we've processed the whole chain
1226 for (;;)
1227 {
1228 // fetch the next block index
1229 uint32_t uNext = pSegment->rgAllocation[uBlock];
1230
1231#ifdef HANDLE_OPTIMIZE_FOR_64_HANDLE_BLOCKS
1232 // determine whether this block is empty
1233 if (((uint64_t*)pSegment->rgFreeMask)[uBlock] == UI64(0xFFFFFFFFFFFFFFFF))
1234#else
1235 // assume this block is empty until we know otherwise
1236 BOOL fEmpty = TRUE;
1237
1238 // get the first mask for this block
1239 uint32_t *pdwMask = pSegment->rgFreeMask + (uBlock * HANDLE_MASKS_PER_BLOCK);
1240 uint32_t *pdwMaskLast = pdwMask + HANDLE_MASKS_PER_BLOCK;
1241
1242 // loop through the masks until we've processed them all or we've found handles
1243 do
1244 {
1245 // is this mask empty?
1246 if (*pdwMask != MASK_EMPTY)
1247 {
1248 // nope - this block still has handles in it
1249 fEmpty = FALSE;
1250 break;
1251 }
1252
1253 // on to the next mask
1254 pdwMask++;
1255
1256 } while (pdwMask < pdwMaskLast);
1257
1258 // is this block empty?
1259 if (fEmpty)
1260#endif
1261 {
1262 // is this block currently locked?
1263 if (BlockIsLocked(pSegment, uBlock))
1264 {
1265 // block cannot be freed, if we were passed a scavenge flag then set it
1266 if (pfScavengeLater)
1267 *pfScavengeLater = TRUE;
1268 }
1269 else
1270 {
1271 // safe to free - did it have user data associated?
1272 uint32_t uData = pSegment->rgUserData[uBlock];
1273 if (uData != BLOCK_INVALID)
1274 {
1275 // data blocks are 'empty' so we keep them locked
1276 // unlock the block so it can be reclaimed below
1277 BlockUnlock(pSegment, uData);
1278
1279 // unlink the data block from the handle block
1280 pSegment->rgUserData[uBlock] = BLOCK_INVALID;
1281
1282 // remember that we need to scavenge the data block chain
1283 fCleanupUserData = TRUE;
1284 }
1285
1286 // mark the block as free
1287 pSegment->rgBlockType[uBlock] = TYPE_INVALID;
1288
1289 // have we freed any other blocks yet?
1290 if (uFirstFreed == BLOCK_INVALID)
1291 {
1292 // no - this is the first one - remember it as the new head
1293 uFirstFreed = uBlock;
1294 }
1295 else
1296 {
1297 // yes - link this block to the other ones in order
1298 pSegment->rgAllocation[uLastFreed] = (uint8_t)uBlock;
1299 }
1300
1301 // remember this block for later
1302 uLastFreed = uBlock;
1303
1304 // are there other blocks in the chain?
1305 if (uPrev != uBlock)
1306 {
1307 // yes - unlink this block from the chain
1308 pSegment->rgAllocation[uPrev] = (uint8_t)uNext;
1309
1310 // if we are removing the tail then pick a new tail
1311 if (pSegment->rgTail[uType] == uBlock)
1312 pSegment->rgTail[uType] = (uint8_t)uPrev;
1313
1314 // if we are removing the hint then pick a new hint
1315 if (pSegment->rgHint[uType] == uBlock)
1316 pSegment->rgHint[uType] = (uint8_t)uNext;
1317
1318 // we removed the current block - reset uBlock to a valid block
1319 uBlock = uPrev;
1320
1321 // N.B. we'll check if we freed uStart later when it's safe to recover
1322 }
1323 else
1324 {
1325 // we're removing last block - sanity check the loop condition
1326 _ASSERTE(uNext == uStart);
1327
1328 // mark this chain as completely empty
1329 pSegment->rgAllocation[uBlock] = BLOCK_INVALID;
1330 pSegment->rgTail[uType] = BLOCK_INVALID;
1331 pSegment->rgHint[uType] = BLOCK_INVALID;
1332 }
1333
1334 // update the number of blocks we've removed
1335 uRemoved++;
1336 }
1337 }
1338
1339 // if we are back at the beginning then it is time to stop
1340 if (uNext == uStart)
1341 break;
1342
1343 // now see if we need to reset our start block
1344 if (uStart == uLastFreed)
1345 uStart = uNext;
1346
1347 // on to the next block
1348 uPrev = uBlock;
1349 uBlock = uNext;
1350 }
1351
1352 // did we remove any blocks?
1353 if (uRemoved)
1354 {
1355 // yes - link the new blocks into the free list
1356 pSegment->rgAllocation[uLastFreed] = pSegment->bFreeList;
1357 pSegment->bFreeList = (uint8_t)uFirstFreed;
1358
1359 // update the free count for this chain
1360 pSegment->rgFreeCount[uType] -= (uRemoved * HANDLE_HANDLES_PER_BLOCK);
1361
1362 // mark for a resort - the free list (and soon allocation chains) may be out of order
1363 pSegment->fResortChains = TRUE;
1364
1365 // if we removed blocks that had user data then we need to reclaim those too
1366 if (fCleanupUserData)
1367 SegmentRemoveFreeBlocks(pSegment, HNDTYPE_INTERNAL_DATABLOCK, NULL);
1368 }
1369}
1370
1371
1372/*
1373 * SegmentInsertBlockFromFreeListWorker
1374 *
1375 * Inserts a block into a block list within a segment. Blocks are obtained from the
1376 * segment's free list. Returns the index of the block inserted, or BLOCK_INVALID
1377 * if no blocks were avaliable.
1378 *
1379 * This routine is the core implementation for SegmentInsertBlockFromFreeList.
1380 *
1381 */
1382uint32_t SegmentInsertBlockFromFreeListWorker(TableSegment *pSegment, uint32_t uType, BOOL fUpdateHint)
1383{
1384 WRAPPER_NO_CONTRACT;
1385
1386 /*
1387 NOTHROW
1388 GC_NOTRIGGER;
1389 MODE_ANY;
1390 */
1391
1392
1393 // fetch the next block from the free list
1394 uint8_t uBlock = pSegment->bFreeList;
1395
1396 // if we got the terminator then there are no more blocks
1397 if (uBlock != BLOCK_INVALID)
1398 {
1399 // are we eating out of the last empty range of blocks?
1400 if (uBlock >= pSegment->bEmptyLine)
1401 {
1402 // get the current commit line
1403 uint32_t uCommitLine = pSegment->bCommitLine;
1404
1405 // if this block is uncommitted then commit some memory now
1406 if (uBlock >= uCommitLine)
1407 {
1408 // figure out where to commit next
1409 void * pvCommit = pSegment->rgValue + (uCommitLine * HANDLE_HANDLES_PER_BLOCK);
1410
1411 // we should commit one more page of handles
1412 uint32_t dwCommit = OS_PAGE_SIZE;
1413
1414 // commit the memory
1415 if (!GCToOSInterface::VirtualCommit(pvCommit, dwCommit))
1416 return BLOCK_INVALID;
1417
1418 // use the previous commit line as the new decommit line
1419 pSegment->bDecommitLine = (uint8_t)uCommitLine;
1420
1421 // adjust the commit line by the number of blocks we commited
1422 pSegment->bCommitLine = (uint8_t)(uCommitLine + (dwCommit / HANDLE_BYTES_PER_BLOCK));
1423 }
1424
1425 // update our empty line
1426 pSegment->bEmptyLine = uBlock + 1;
1427 }
1428
1429 // unlink our block from the free list
1430 pSegment->bFreeList = pSegment->rgAllocation[uBlock];
1431
1432 // link our block into the specified chain
1433 uint32_t uOldTail = pSegment->rgTail[uType];
1434 if (uOldTail == BLOCK_INVALID)
1435 {
1436 // first block, set as head and link to itself
1437 pSegment->rgAllocation[uBlock] = (uint8_t)uBlock;
1438
1439 // there are no other blocks - update the hint anyway
1440 fUpdateHint = TRUE;
1441 }
1442 else
1443 {
1444 // not first block - link circularly
1445 pSegment->rgAllocation[uBlock] = pSegment->rgAllocation[uOldTail];
1446 pSegment->rgAllocation[uOldTail] = (uint8_t)uBlock;
1447
1448 // chain may need resorting depending on what we added
1449 pSegment->fResortChains = TRUE;
1450 }
1451
1452 // mark this block with the type we're using it for
1453 pSegment->rgBlockType[uBlock] = (uint8_t)uType;
1454
1455 // update the chain tail
1456 pSegment->rgTail[uType] = (uint8_t)uBlock;
1457
1458 // if we are supposed to update the hint, then point it at the new block
1459 if (fUpdateHint)
1460 pSegment->rgHint[uType] = (uint8_t)uBlock;
1461
1462 // increment the chain's free count to reflect the additional block
1463 pSegment->rgFreeCount[uType] += HANDLE_HANDLES_PER_BLOCK;
1464 }
1465
1466 // all done
1467 return uBlock;
1468}
1469
1470
1471/*
1472 * SegmentInsertBlockFromFreeList
1473 *
1474 * Inserts a block into a block list within a segment. Blocks are obtained from the
1475 * segment's free list. Returns the index of the block inserted, or BLOCK_INVALID
1476 * if no blocks were avaliable.
1477 *
1478 * This routine does the work of securing a parallel user data block if required.
1479 *
1480 */
1481uint32_t SegmentInsertBlockFromFreeList(TableSegment *pSegment, uint32_t uType, BOOL fUpdateHint)
1482{
1483 LIMITED_METHOD_CONTRACT;
1484
1485 /*
1486 NOTHROW;
1487 GC_NOTRIGGER;
1488 MODE_ANY;
1489 */
1490
1491 uint32_t uBlock, uData = 0;
1492
1493 // does this block type require user data?
1494 BOOL fUserData = TypeHasUserData(pSegment->pHandleTable, uType);
1495
1496 // if we need user data then we need to make sure it can go in the same segment as the handles
1497 if (fUserData)
1498 {
1499 // if we can't also fit the user data in this segment then bail
1500 uBlock = pSegment->bFreeList;
1501 if ((uBlock == BLOCK_INVALID) || (pSegment->rgAllocation[uBlock] == BLOCK_INVALID))
1502 return BLOCK_INVALID;
1503
1504 // allocate our user data block (we do it in this order so that free order is nicer)
1505 uData = SegmentInsertBlockFromFreeListWorker(pSegment, HNDTYPE_INTERNAL_DATABLOCK, FALSE);
1506 }
1507
1508 // now allocate the requested block
1509 uBlock = SegmentInsertBlockFromFreeListWorker(pSegment, uType, fUpdateHint);
1510
1511 // should we have a block for user data too?
1512 if (fUserData)
1513 {
1514 // did we get them both?
1515 if ((uBlock != BLOCK_INVALID) && (uData != BLOCK_INVALID))
1516 {
1517 // link the data block to the requested block
1518 pSegment->rgUserData[uBlock] = (uint8_t)uData;
1519
1520 // no handles are ever allocated out of a data block
1521 // lock the block so it won't be reclaimed accidentally
1522 BlockLock(pSegment, uData);
1523 }
1524 else
1525 {
1526 // NOTE: We pre-screened that the blocks exist above, so we should only
1527 // get here under heavy load when a MEM_COMMIT operation fails.
1528
1529 // if the type block allocation succeeded then scavenge the type block list
1530 if (uBlock != BLOCK_INVALID)
1531 SegmentRemoveFreeBlocks(pSegment, uType, NULL);
1532
1533 // if the user data allocation succeeded then scavenge the user data list
1534 if (uData != BLOCK_INVALID)
1535 SegmentRemoveFreeBlocks(pSegment, HNDTYPE_INTERNAL_DATABLOCK, NULL);
1536
1537 // make sure we return failure
1538 uBlock = BLOCK_INVALID;
1539 }
1540 }
1541
1542 // all done
1543 return uBlock;
1544}
1545
1546
1547/*
1548 * SegmentResortChains
1549 *
1550 * Sorts the block chains for optimal scanning order.
1551 * Sorts the free list to combat fragmentation.
1552 *
1553 */
1554void SegmentResortChains(TableSegment *pSegment)
1555{
1556 WRAPPER_NO_CONTRACT;
1557
1558 // clear the sort flag for this segment
1559 pSegment->fResortChains = FALSE;
1560 BOOL fScavengingOccurred = FALSE;
1561
1562 // first, do we need to scavenge any blocks?
1563 if (pSegment->fNeedsScavenging)
1564 {
1565 // clear the scavenge flag
1566 pSegment->fNeedsScavenging = FALSE;
1567
1568 fScavengingOccurred = TRUE;
1569
1570 // we may need to explicitly scan the user data chain too
1571 BOOL fCleanupUserData = FALSE;
1572
1573 // fetch the empty line for this segment
1574 uint32_t uLast = pSegment->bEmptyLine;
1575
1576 // loop over all active blocks, scavenging the empty ones as we go
1577 for (uint32_t uBlock = 0; uBlock < uLast; uBlock++)
1578 {
1579 // fetch the block type of this block
1580 uint32_t uType = pSegment->rgBlockType[uBlock];
1581
1582 // only process public block types - we handle data blocks separately
1583 if (uType < HANDLE_MAX_PUBLIC_TYPES)
1584 {
1585#ifdef HANDLE_OPTIMIZE_FOR_64_HANDLE_BLOCKS
1586 // determine whether this block is empty
1587 if (((uint64_t*)pSegment->rgFreeMask)[uBlock] == UI64(0xFFFFFFFFFFFFFFFF))
1588#else
1589 // assume this block is empty until we know otherwise
1590 BOOL fEmpty = TRUE;
1591
1592 // get the first mask for this block
1593 uint32_t *pdwMask = pSegment->rgFreeMask + (uBlock * HANDLE_MASKS_PER_BLOCK);
1594 uint32_t *pdwMaskLast = pdwMask + HANDLE_MASKS_PER_BLOCK;
1595
1596 // loop through the masks until we've processed them all or we've found handles
1597 do
1598 {
1599 // is this mask empty?
1600 if (*pdwMask != MASK_EMPTY)
1601 {
1602 // nope - this block still has handles in it
1603 fEmpty = FALSE;
1604 break;
1605 }
1606
1607 // on to the next mask
1608 pdwMask++;
1609
1610 } while (pdwMask < pdwMaskLast);
1611
1612 // is this block empty?
1613 if (fEmpty)
1614#endif
1615 {
1616 // is the block unlocked?
1617 if (!BlockIsLocked(pSegment, uBlock))
1618 {
1619 // safe to free - did it have user data associated?
1620 uint32_t uData = pSegment->rgUserData[uBlock];
1621 if (uData != BLOCK_INVALID)
1622 {
1623 // data blocks are 'empty' so we keep them locked
1624 // unlock the block so it can be reclaimed below
1625 BlockUnlock(pSegment, uData);
1626
1627 // unlink the data block from the handle block
1628 pSegment->rgUserData[uBlock] = BLOCK_INVALID;
1629
1630 // remember that we need to scavenge the data block chain
1631 fCleanupUserData = TRUE;
1632 }
1633
1634 // mark the block as free
1635 pSegment->rgBlockType[uBlock] = TYPE_INVALID;
1636
1637 // fix up the free count for the block's type
1638 pSegment->rgFreeCount[uType] -= HANDLE_HANDLES_PER_BLOCK;
1639
1640 // N.B. we don't update the list linkages here since they are rebuilt below
1641 }
1642 }
1643 }
1644 }
1645
1646 // if we have to clean up user data then do that now
1647 if (fCleanupUserData)
1648 SegmentRemoveFreeBlocks(pSegment, HNDTYPE_INTERNAL_DATABLOCK, NULL);
1649 }
1650
1651 // keep some per-chain data
1652 uint8_t rgChainCurr[HANDLE_MAX_INTERNAL_TYPES];
1653 uint8_t rgChainHigh[HANDLE_MAX_INTERNAL_TYPES];
1654 uint8_t bChainFree = BLOCK_INVALID;
1655 uint32_t uEmptyLine = BLOCK_INVALID;
1656 BOOL fContiguousWithFreeList = TRUE;
1657
1658 // preinit the chain data to no blocks
1659 uint32_t uType;
1660 for (uType = 0; uType < HANDLE_MAX_INTERNAL_TYPES; uType++)
1661 rgChainHigh[uType] = rgChainCurr[uType] = BLOCK_INVALID;
1662
1663 // scan back through the block types
1664 uint8_t uBlock = HANDLE_BLOCKS_PER_SEGMENT;
1665 while (uBlock > 0)
1666 {
1667 // decrement the block index
1668 uBlock--;
1669
1670 // fetch the type for this block
1671 uType = pSegment->rgBlockType[uBlock];
1672
1673 // is this block allocated?
1674 if (uType != TYPE_INVALID)
1675 {
1676 // looks allocated
1677 fContiguousWithFreeList = FALSE;
1678
1679 // hope the segment's not corrupt :)
1680 _ASSERTE(uType < HANDLE_MAX_INTERNAL_TYPES);
1681
1682 // remember the first block we see for each type
1683 if (rgChainHigh[uType] == BLOCK_INVALID)
1684 rgChainHigh[uType] = uBlock;
1685
1686 // link this block to the last one we saw of this type
1687 pSegment->rgAllocation[uBlock] = rgChainCurr[uType];
1688
1689 // remember this block in type chain
1690 rgChainCurr[uType] = (uint8_t)uBlock;
1691 }
1692 else
1693 {
1694 // block is free - is it also contiguous with the free list?
1695 if (fContiguousWithFreeList)
1696 uEmptyLine = uBlock;
1697
1698 // link this block to the last one in the free chain
1699 pSegment->rgAllocation[uBlock] = bChainFree;
1700
1701 // add this block to the free list
1702 bChainFree = (uint8_t)uBlock;
1703 }
1704 }
1705
1706 // now close the loops and store the tails
1707 for (uType = 0; uType < HANDLE_MAX_INTERNAL_TYPES; uType++)
1708 {
1709 // get the first block in the list
1710 uint8_t bBlock = rgChainCurr[uType];
1711
1712 // if there is a list then make it circular and save it
1713 if (bBlock != BLOCK_INVALID)
1714 {
1715 // highest block we saw becomes tail
1716 uint32_t uTail = rgChainHigh[uType];
1717
1718 // store tail in segment
1719 pSegment->rgTail[uType] = (uint8_t)uTail;
1720
1721 // link tail to head
1722 pSegment->rgAllocation[uTail] = bBlock;
1723
1724 // If we scavenged blocks above then we might have left the hint pointing at the free chain. Reset
1725 // it back into this chain if so (the choice of block is arbitrary, this case is very rare).
1726 if (pSegment->rgBlockType[pSegment->rgHint[uType]] != uType)
1727 pSegment->rgHint[uType] = bBlock;
1728 }
1729 else
1730 {
1731 // No blocks of this type were found in the rgBlockType array, meaning either there were no
1732 // such blocks on entry to this function (in which case the associated tail is guaranteed
1733 // to already be marked invalid) OR that there were blocks but all of them were reclaimed
1734 // by the scavenging logic above (in which case the associated tail is guaranteed to point
1735 // to one of the scavenged blocks). In the latter case, the tail is currently "stale"
1736 // and therefore needs to be manually updated.
1737 if (pSegment->rgTail[uType] != BLOCK_INVALID)
1738 {
1739 _ASSERTE(fScavengingOccurred);
1740 pSegment->rgTail[uType] = BLOCK_INVALID;
1741 pSegment->rgHint[uType] = BLOCK_INVALID;
1742 }
1743 }
1744 }
1745
1746 // store the new free list head
1747 pSegment->bFreeList = bChainFree;
1748
1749 // compute the new empty line
1750 if (uEmptyLine > HANDLE_BLOCKS_PER_SEGMENT)
1751 uEmptyLine = HANDLE_BLOCKS_PER_SEGMENT;
1752
1753 // store the updated empty line
1754 pSegment->bEmptyLine = (uint8_t)uEmptyLine;
1755}
1756
1757/*
1758 * DoesSegmentNeedsToTrimExcessPages
1759 *
1760 * Checks to see if any pages can be decommitted from the segment
1761 *
1762 */
1763BOOL DoesSegmentNeedsToTrimExcessPages(TableSegment *pSegment)
1764{
1765 WRAPPER_NO_CONTRACT;
1766
1767 // fetch the empty and decommit lines
1768 uint32_t uEmptyLine = pSegment->bEmptyLine;
1769 uint32_t uDecommitLine = pSegment->bDecommitLine;
1770
1771 // check to see if we can decommit some handles
1772 // NOTE: we use '<' here to avoid playing ping-pong on page boundaries
1773 // this is OK since the zero case is handled elsewhere (segment gets freed)
1774 if (uEmptyLine < uDecommitLine)
1775 {
1776 // derive some useful info about the page size
1777 uintptr_t dwPageRound = (uintptr_t)OS_PAGE_SIZE - 1;
1778 uintptr_t dwPageMask = ~dwPageRound;
1779
1780 // compute the address corresponding to the empty line
1781 uintptr_t dwLo = (uintptr_t)pSegment->rgValue + (uEmptyLine * HANDLE_BYTES_PER_BLOCK);
1782
1783 // adjust the empty line address to the start of the nearest whole empty page
1784 dwLo = (dwLo + dwPageRound) & dwPageMask;
1785
1786 // compute the address corresponding to the old commit line
1787 uintptr_t dwHi = (uintptr_t)pSegment->rgValue + ((uint32_t)pSegment->bCommitLine * HANDLE_BYTES_PER_BLOCK);
1788
1789 // is there anything to decommit?
1790 if (dwHi > dwLo)
1791 {
1792 return TRUE;
1793 }
1794 }
1795
1796 return FALSE;
1797}
1798
1799
1800/*
1801 * SegmentTrimExcessPages
1802 *
1803 * Checks to see if any pages can be decommitted from the segment.
1804 * In case there any unused pages it goes and decommits them.
1805 *
1806 */
1807void SegmentTrimExcessPages(TableSegment *pSegment)
1808{
1809 WRAPPER_NO_CONTRACT;
1810
1811 // fetch the empty and decommit lines
1812 uint32_t uEmptyLine = pSegment->bEmptyLine;
1813 uint32_t uDecommitLine = pSegment->bDecommitLine;
1814
1815 // check to see if we can decommit some handles
1816 // NOTE: we use '<' here to avoid playing ping-pong on page boundaries
1817 // this is OK since the zero case is handled elsewhere (segment gets freed)
1818 if (uEmptyLine < uDecommitLine)
1819 {
1820 // derive some useful info about the page size
1821 uintptr_t dwPageRound = (uintptr_t)OS_PAGE_SIZE - 1;
1822 uintptr_t dwPageMask = ~dwPageRound;
1823
1824 // compute the address corresponding to the empty line
1825 uintptr_t dwLo = (uintptr_t)pSegment->rgValue + (uEmptyLine * HANDLE_BYTES_PER_BLOCK);
1826
1827 // adjust the empty line address to the start of the nearest whole empty page
1828 dwLo = (dwLo + dwPageRound) & dwPageMask;
1829
1830 // compute the address corresponding to the old commit line
1831 uintptr_t dwHi = (uintptr_t)pSegment->rgValue + ((uint32_t)pSegment->bCommitLine * HANDLE_BYTES_PER_BLOCK);
1832
1833 // is there anything to decommit?
1834 if (dwHi > dwLo)
1835 {
1836 // decommit the memory
1837 GCToOSInterface::VirtualDecommit((void *)dwLo, dwHi - dwLo);
1838
1839 // update the commit line
1840 pSegment->bCommitLine = (uint8_t)((dwLo - (size_t)pSegment->rgValue) / HANDLE_BYTES_PER_BLOCK);
1841
1842 // compute the address for the new decommit line
1843 size_t dwDecommitAddr = dwLo - OS_PAGE_SIZE;
1844
1845 // assume a decommit line of zero until we know otherwise
1846 uDecommitLine = 0;
1847
1848 // if the address is within the handle area then compute the line from the address
1849 if (dwDecommitAddr > (size_t)pSegment->rgValue)
1850 uDecommitLine = (uint32_t)((dwDecommitAddr - (size_t)pSegment->rgValue) / HANDLE_BYTES_PER_BLOCK);
1851
1852 // update the decommit line
1853 pSegment->bDecommitLine = (uint8_t)uDecommitLine;
1854 }
1855 }
1856}
1857
1858
1859/*
1860 * BlockAllocHandlesInMask
1861 *
1862 * Attempts to allocate the requested number of handes of the specified type,
1863 * from the specified mask of the specified handle block.
1864 *
1865 * Returns the number of available handles actually allocated.
1866 *
1867 */
1868uint32_t BlockAllocHandlesInMask(TableSegment *pSegment, uint32_t uBlock,
1869 uint32_t *pdwMask, uint32_t uHandleMaskDisplacement,
1870 OBJECTHANDLE *pHandleBase, uint32_t uCount)
1871{
1872 LIMITED_METHOD_CONTRACT;
1873 UNREFERENCED_PARAMETER(uBlock);
1874
1875 // keep track of how many handles we have left to allocate
1876 uint32_t uRemain = uCount;
1877
1878 // fetch the free mask into a local so we can play with it
1879 uint32_t dwFree = *pdwMask;
1880
1881 // keep track of our displacement within the mask
1882 uint32_t uByteDisplacement = 0;
1883
1884 // examine the mask byte by byte for free handles
1885 do
1886 {
1887 // grab the low byte of the mask
1888 uint32_t dwLowByte = (dwFree & MASK_LOBYTE);
1889
1890 // are there any free handles here?
1891 if (dwLowByte)
1892 {
1893 // remember which handles we've taken
1894 uint32_t dwAlloc = 0;
1895
1896 // loop until we've allocated all the handles we can from here
1897 do
1898 {
1899 // get the index of the next handle
1900 uint32_t uIndex = c_rgLowBitIndex[dwLowByte];
1901
1902 // compute the mask for the handle we chose
1903 dwAlloc |= (1 << uIndex);
1904
1905 // remove this handle from the mask byte
1906 dwLowByte &= ~dwAlloc;
1907
1908 // compute the index of this handle in the segment
1909 uIndex += uHandleMaskDisplacement + uByteDisplacement;
1910
1911 // store the allocated handle in the handle array
1912 *pHandleBase = (OBJECTHANDLE)(pSegment->rgValue + uIndex);
1913
1914 // adjust our count and array pointer
1915 uRemain--;
1916 pHandleBase++;
1917
1918 } while (dwLowByte && uRemain);
1919
1920 // shift the allocation mask into position
1921 dwAlloc <<= uByteDisplacement;
1922
1923 // update the mask to account for the handles we allocated
1924 *pdwMask &= ~dwAlloc;
1925 }
1926
1927 // on to the next byte in the mask
1928 dwFree >>= BITS_PER_BYTE;
1929 uByteDisplacement += BITS_PER_BYTE;
1930
1931 } while (uRemain && dwFree);
1932
1933 // return the number of handles we got
1934 return (uCount - uRemain);
1935
1936}
1937
1938
1939/*
1940 * BlockAllocHandlesInitial
1941 *
1942 * Allocates a specified number of handles from a newly committed (empty) block.
1943 *
1944 */
1945uint32_t BlockAllocHandlesInitial(TableSegment *pSegment, uint32_t uType, uint32_t uBlock,
1946 OBJECTHANDLE *pHandleBase, uint32_t uCount)
1947{
1948 LIMITED_METHOD_CONTRACT;
1949 UNREFERENCED_PARAMETER(uType);
1950
1951 // sanity check
1952 _ASSERTE(uCount);
1953
1954 // validate the number of handles we were asked to allocate
1955 if (uCount > HANDLE_HANDLES_PER_BLOCK)
1956 {
1957 _ASSERTE(FALSE);
1958 uCount = HANDLE_HANDLES_PER_BLOCK;
1959 }
1960
1961 // keep track of how many handles we have left to mark in masks
1962 uint32_t uRemain = uCount;
1963
1964 // get the first mask for this block
1965 uint32_t *pdwMask = pSegment->rgFreeMask + (uBlock * HANDLE_MASKS_PER_BLOCK);
1966
1967 // loop through the masks, zeroing the appropriate free bits
1968 do
1969 {
1970 // this is a brand new block - all masks we encounter should be totally free
1971 _ASSERTE(*pdwMask == MASK_EMPTY);
1972
1973 // pick an initial guess at the number to allocate
1974 uint32_t uAlloc = uRemain;
1975
1976 // compute the default mask based on that count
1977 uint32_t dwNewMask;
1978 // are we allocating all of them?
1979 if (uAlloc >= HANDLE_HANDLES_PER_MASK)
1980 {
1981 dwNewMask = MASK_FULL; // avoid unpredictable shift
1982 uAlloc = HANDLE_HANDLES_PER_MASK;
1983 }
1984 else
1985 {
1986 dwNewMask = (MASK_EMPTY << uAlloc);
1987 }
1988
1989 // set the free mask
1990 *pdwMask = dwNewMask;
1991
1992 // update our count and mask pointer
1993 uRemain -= uAlloc;
1994 pdwMask++;
1995
1996 } while (uRemain);
1997
1998 // compute the bounds for allocation so we can copy the handles
1999 _UNCHECKED_OBJECTREF *pValue = pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK);
2000 _UNCHECKED_OBJECTREF *pLast = pValue + uCount;
2001
2002 // loop through filling in the output array with handles
2003 do
2004 {
2005 // store the next handle in the next array slot
2006 *pHandleBase = (OBJECTHANDLE)pValue;
2007
2008 // increment our source and destination
2009 pValue++;
2010 pHandleBase++;
2011
2012 } while (pValue < pLast);
2013
2014 // return the number of handles we allocated
2015 return uCount;
2016}
2017
2018
2019/*
2020 * BlockAllocHandles
2021 *
2022 * Attempts to allocate the requested number of handes of the specified type,
2023 * from the specified handle block.
2024 *
2025 * Returns the number of available handles actually allocated.
2026 *
2027 */
2028uint32_t BlockAllocHandles(TableSegment *pSegment, uint32_t uBlock, OBJECTHANDLE *pHandleBase, uint32_t uCount)
2029{
2030 WRAPPER_NO_CONTRACT;
2031
2032 /*
2033 NOTHROW;
2034 GC_NOTRIGGER;
2035 MODE_ANY;
2036 */
2037
2038 // keep track of how many handles we have left to allocate
2039 uint32_t uRemain = uCount;
2040
2041 // set up our loop and limit mask pointers
2042 uint32_t *pdwMask = pSegment->rgFreeMask + (uBlock * HANDLE_MASKS_PER_BLOCK);
2043 uint32_t *pdwMaskLast = pdwMask + HANDLE_MASKS_PER_BLOCK;
2044
2045 // keep track of the handle displacement for the mask we're scanning
2046 uint32_t uDisplacement = uBlock * HANDLE_HANDLES_PER_BLOCK;
2047
2048 // loop through all the masks, allocating handles as we go
2049 do
2050 {
2051 // if this mask indicates free handles then grab them
2052 if (*pdwMask)
2053 {
2054 // allocate as many handles as we need from this mask
2055 uint32_t uSatisfied = BlockAllocHandlesInMask(pSegment, uBlock, pdwMask, uDisplacement, pHandleBase, uRemain);
2056
2057 // adjust our count and array pointer
2058 uRemain -= uSatisfied;
2059 pHandleBase += uSatisfied;
2060
2061 // if there are no remaining slots to be filled then we are done
2062 if (!uRemain)
2063 break;
2064 }
2065
2066 // on to the next mask
2067 pdwMask++;
2068 uDisplacement += HANDLE_HANDLES_PER_MASK;
2069
2070 } while (pdwMask < pdwMaskLast);
2071
2072 // return the number of handles we got
2073 return (uCount - uRemain);
2074}
2075
2076
2077/*
2078 * SegmentAllocHandlesFromTypeChain
2079 *
2080 * Attempts to allocate the requested number of handes of the specified type,
2081 * from the specified segment's block chain for the specified type. This routine
2082 * ONLY scavenges existing blocks in the type chain. No new blocks are committed.
2083 *
2084 * Returns the number of available handles actually allocated.
2085 *
2086 */
2087uint32_t SegmentAllocHandlesFromTypeChain(TableSegment *pSegment, uint32_t uType, OBJECTHANDLE *pHandleBase, uint32_t uCount)
2088{
2089 WRAPPER_NO_CONTRACT;
2090
2091 /*
2092 NOTHROW;
2093 GC_NOTRIGGER;
2094 MODE_ANY;
2095 */
2096
2097 // fetch the number of handles available in this chain
2098 uint32_t uAvail = pSegment->rgFreeCount[uType];
2099
2100 // is the available count greater than the requested count?
2101 if (uAvail > uCount)
2102 {
2103 // yes - all requested handles are available
2104 uAvail = uCount;
2105 }
2106 else
2107 {
2108 // no - we can only satisfy some of the request
2109 uCount = uAvail;
2110 }
2111
2112 // did we find that any handles are available?
2113 if (uAvail)
2114 {
2115 // yes - fetch the head of the block chain and set up a loop limit
2116 uint32_t uBlock = pSegment->rgHint[uType];
2117 uint32_t uLast = uBlock;
2118
2119 // loop until we have found all handles known to be available
2120 for (;;)
2121 {
2122 // try to allocate handles from the current block
2123 uint32_t uSatisfied = BlockAllocHandles(pSegment, uBlock, pHandleBase, uAvail);
2124
2125 // did we get everything we needed?
2126 if (uSatisfied == uAvail)
2127 {
2128 // yes - update the hint for this type chain and get out
2129 pSegment->rgHint[uType] = (uint8_t)uBlock;
2130 break;
2131 }
2132
2133 // adjust our count and array pointer
2134 uAvail -= uSatisfied;
2135 pHandleBase += uSatisfied;
2136
2137 // fetch the next block in the type chain
2138 uBlock = pSegment->rgAllocation[uBlock];
2139
2140 // are we out of blocks?
2141 if (uBlock == uLast)
2142 {
2143 // free count is corrupt
2144 _ASSERTE(FALSE);
2145
2146 // avoid making the problem any worse
2147 uCount -= uAvail;
2148 break;
2149 }
2150 }
2151
2152 // update the free count
2153 pSegment->rgFreeCount[uType] -= uCount;
2154 }
2155
2156 // return the number of handles we got
2157 return uCount;
2158}
2159
2160
2161/*
2162 * SegmentAllocHandlesFromFreeList
2163 *
2164 * Attempts to allocate the requested number of handes of the specified type,
2165 * by committing blocks from the free list to that type's type chain.
2166 *
2167 * Returns the number of available handles actually allocated.
2168 *
2169 */
2170uint32_t SegmentAllocHandlesFromFreeList(TableSegment *pSegment, uint32_t uType, OBJECTHANDLE *pHandleBase, uint32_t uCount)
2171{
2172 LIMITED_METHOD_CONTRACT;
2173
2174 /*
2175 NOTHROW;
2176 GC_NOTRIGGER;
2177 MODE_ANY;
2178 */
2179
2180 // keep track of how many handles we have left to allocate
2181 uint32_t uRemain = uCount;
2182
2183 // loop allocating handles until we are done or we run out of free blocks
2184 do
2185 {
2186 // start off assuming we can allocate all the handles
2187 uint32_t uAlloc = uRemain;
2188
2189 // we can only get a block-full of handles at a time
2190 if (uAlloc > HANDLE_HANDLES_PER_BLOCK)
2191 uAlloc = HANDLE_HANDLES_PER_BLOCK;
2192
2193 // try to get a block from the free list
2194 uint32_t uBlock = SegmentInsertBlockFromFreeList(pSegment, uType, (uRemain == uCount));
2195
2196 // if there are no free blocks left then we are done
2197 if (uBlock == BLOCK_INVALID)
2198 break;
2199
2200 // initialize the block by allocating the required handles into the array
2201 uAlloc = BlockAllocHandlesInitial(pSegment, uType, uBlock, pHandleBase, uAlloc);
2202
2203 // adjust our count and array pointer
2204 uRemain -= uAlloc;
2205 pHandleBase += uAlloc;
2206
2207 } while (uRemain);
2208
2209 // compute the number of handles we took
2210 uCount -= uRemain;
2211
2212 // update the free count by the number of handles we took
2213 pSegment->rgFreeCount[uType] -= uCount;
2214
2215 // return the number of handles we got
2216 return uCount;
2217}
2218
2219
2220/*
2221 * SegmentAllocHandles
2222 *
2223 * Attempts to allocate the requested number of handes of the specified type,
2224 * from the specified segment.
2225 *
2226 * Returns the number of available handles actually allocated.
2227 *
2228 */
2229uint32_t SegmentAllocHandles(TableSegment *pSegment, uint32_t uType, OBJECTHANDLE *pHandleBase, uint32_t uCount)
2230{
2231 LIMITED_METHOD_CONTRACT;
2232
2233 /*
2234 NOTHROW;
2235 GC_NOTRIGGER;
2236 MODE_ANY;
2237 */
2238
2239 // first try to get some handles from the existing type chain
2240 uint32_t uSatisfied = SegmentAllocHandlesFromTypeChain(pSegment, uType, pHandleBase, uCount);
2241
2242 // if there are still slots to be filled then we need to commit more blocks to the type chain
2243 if (uSatisfied < uCount)
2244 {
2245 // adjust our count and array pointer
2246 uCount -= uSatisfied;
2247 pHandleBase += uSatisfied;
2248
2249 // get remaining handles by committing blocks from the free list
2250 uSatisfied += SegmentAllocHandlesFromFreeList(pSegment, uType, pHandleBase, uCount);
2251 }
2252
2253 // return the number of handles we got
2254 return uSatisfied;
2255}
2256
2257
2258/*
2259 * TableAllocBulkHandles
2260 *
2261 * Attempts to allocate the requested number of handes of the specified type.
2262 *
2263 * Returns the number of handles that were actually allocated. This is always
2264 * the same as the number of handles requested except in out-of-memory conditions,
2265 * in which case it is the number of handles that were successfully allocated.
2266 *
2267 */
2268uint32_t TableAllocBulkHandles(HandleTable *pTable, uint32_t uType, OBJECTHANDLE *pHandleBase, uint32_t uCount)
2269{
2270 WRAPPER_NO_CONTRACT;
2271
2272 /*
2273 NOTHROW;
2274 GC_NOTRIGGER;
2275 MODE_ANY;
2276 */
2277
2278 // keep track of how many handles we have left to allocate
2279 uint32_t uRemain = uCount;
2280
2281 // start with the first segment and loop until we are done
2282 TableSegment *pSegment = pTable->pSegmentList;
2283
2284 uint8_t bLastSequence = 0;
2285 BOOL fNewSegment = FALSE;
2286
2287 for (;;)
2288 {
2289 // get some handles from the current segment
2290 uint32_t uSatisfied = SegmentAllocHandles(pSegment, uType, pHandleBase, uRemain);
2291
2292 // adjust our count and array pointer
2293 uRemain -= uSatisfied;
2294 pHandleBase += uSatisfied;
2295
2296 // if there are no remaining slots to be filled then we are done
2297 if (!uRemain)
2298 break;
2299
2300 // fetch the next segment in the chain.
2301 TableSegment *pNextSegment = NULL;
2302
2303 if (!fNewSegment)
2304 {
2305 pNextSegment = pSegment->pNextSegment;
2306 if (!pNextSegment)
2307 {
2308 bLastSequence = pSegment->bSequence;
2309 fNewSegment = TRUE;
2310 }
2311 }
2312
2313 // if are no more segments then allocate another
2314 if (fNewSegment)
2315 {
2316 // ok if this fails then we're out of luck
2317 pNextSegment = SegmentAlloc(pTable);
2318 if (!pNextSegment)
2319 {
2320 // we ran out of memory allocating a new segment.
2321 // this may not be catastrophic - if there are still some
2322 // handles in the cache then some allocations may succeed.
2323 break;
2324 }
2325
2326 // set up the correct sequence number for the new segment
2327 pNextSegment->bSequence = (uint8_t)(((uint32_t)bLastSequence + 1) % 0x100);
2328 bLastSequence = pNextSegment->bSequence;
2329
2330 // link the new segment into the list by the order of segment address
2331 TableSegment* pWalk = pTable->pSegmentList;
2332 if ((uintptr_t)pNextSegment < (uintptr_t)pWalk)
2333 {
2334 pNextSegment->pNextSegment = pWalk;
2335 pTable->pSegmentList = pNextSegment;
2336 }
2337 else
2338 {
2339 while (pWalk)
2340 {
2341 if (pWalk->pNextSegment == NULL)
2342 {
2343 pWalk->pNextSegment = pNextSegment;
2344 break;
2345 }
2346 else if ((uintptr_t)pWalk->pNextSegment > (uintptr_t)pNextSegment)
2347 {
2348 pNextSegment->pNextSegment = pWalk->pNextSegment;
2349 pWalk->pNextSegment = pNextSegment;
2350 break;
2351 }
2352 pWalk = pWalk->pNextSegment;
2353 }
2354 }
2355 }
2356
2357 // try again with new segment
2358 pSegment = pNextSegment;
2359 }
2360
2361 // compute the number of handles we actually got
2362 uint32_t uAllocated = (uCount - uRemain);
2363
2364 // update the count of handles marked as "used"
2365 pTable->dwCount += uAllocated;
2366
2367 // return the number of handles we actually got
2368 return uAllocated;
2369}
2370
2371
2372/*
2373 * BlockFreeHandlesInMask
2374 *
2375 * Frees some portion of an array of handles of the specified type.
2376 * The array is scanned forward and handles are freed until a handle
2377 * from a different mask is encountered.
2378 *
2379 * Returns the number of handles that were freed from the front of the array.
2380 *
2381 */
2382uint32_t BlockFreeHandlesInMask(TableSegment *pSegment, uint32_t uBlock, uint32_t uMask, OBJECTHANDLE *pHandleBase, uint32_t uCount,
2383 uintptr_t *pUserData, uint32_t *puActualFreed, BOOL *pfAllMasksFree)
2384{
2385 LIMITED_METHOD_CONTRACT;
2386
2387 // keep track of how many handles we have left to free
2388 uint32_t uRemain = uCount;
2389
2390#ifdef _PREFAST_
2391#pragma warning(push)
2392#pragma warning(disable:6305) // "This code deals with a bit vector mapped piece of code, so there is no mismatch between sizeof and countof"
2393#endif
2394
2395 // if this block has user data, convert the pointer to be mask-relative
2396 if (pUserData)
2397 pUserData += (uMask * HANDLE_HANDLES_PER_MASK);
2398
2399 // convert our mask index to be segment-relative
2400 uMask += (uBlock * HANDLE_MASKS_PER_BLOCK);
2401
2402 // compute the handle bounds for our mask
2403 OBJECTHANDLE firstHandle = (OBJECTHANDLE)(pSegment->rgValue + (uMask * HANDLE_HANDLES_PER_MASK));
2404 OBJECTHANDLE lastHandle = (OBJECTHANDLE)((_UNCHECKED_OBJECTREF *)firstHandle + HANDLE_HANDLES_PER_MASK);
2405
2406#ifdef _PREFAST_
2407#pragma warning(pop)
2408#endif
2409
2410 // keep a local copy of the free mask to update as we free handles
2411 uint32_t dwFreeMask = pSegment->rgFreeMask[uMask];
2412
2413 // keep track of how many bogus frees we are asked to do
2414 uint32_t uBogus = 0;
2415
2416 // loop freeing handles until we encounter one outside our block or there are none left
2417 do
2418 {
2419 // fetch the next handle in the array
2420 OBJECTHANDLE handle = *pHandleBase;
2421
2422 // if the handle is outside our segment then we are done
2423 if ((handle < firstHandle) || (handle >= lastHandle))
2424 break;
2425
2426 // sanity check - the handle should no longer refer to an object here
2427 _ASSERTE(HndIsNullOrDestroyedHandle(*(_UNCHECKED_OBJECTREF *)handle));
2428
2429 // compute the handle index within the mask
2430 uint32_t uHandle = (uint32_t)(handle - firstHandle);
2431
2432 // if there is user data then clear the user data for this handle
2433 if (pUserData)
2434 pUserData[uHandle] = 0L;
2435
2436 // compute the mask bit for this handle
2437 uint32_t dwFreeBit = (1 << uHandle);
2438
2439 // the handle should not already be free
2440 if ((dwFreeMask & dwFreeBit) != 0)
2441 {
2442 // SOMEONE'S FREEING A HANDLE THAT ISN'T ALLOCATED
2443 uBogus++;
2444 _ASSERTE(FALSE);
2445 }
2446
2447 // add this handle to the tally of freed handles
2448 dwFreeMask |= dwFreeBit;
2449
2450 // adjust our count and array pointer
2451 uRemain--;
2452 pHandleBase++;
2453
2454 } while (uRemain);
2455
2456 // update the mask to reflect the handles we changed
2457 pSegment->rgFreeMask[uMask] = dwFreeMask;
2458
2459 // if not all handles in this mask are free then tell our caller not to check the block
2460 if (dwFreeMask != MASK_EMPTY)
2461 *pfAllMasksFree = FALSE;
2462
2463 // compute the number of handles we processed from the array
2464 uint32_t uFreed = (uCount - uRemain);
2465
2466 // tell the caller how many handles we actually freed
2467 *puActualFreed += (uFreed - uBogus);
2468
2469 // return the number of handles we actually freed
2470 return uFreed;
2471}
2472
2473
2474/*
2475 * BlockFreeHandles
2476 *
2477 * Frees some portion of an array of handles of the specified type.
2478 * The array is scanned forward and handles are freed until a handle
2479 * from a different block is encountered.
2480 *
2481 * Returns the number of handles that were freed from the front of the array.
2482 *
2483 */
2484uint32_t BlockFreeHandles(TableSegment *pSegment, uint32_t uBlock, OBJECTHANDLE *pHandleBase, uint32_t uCount,
2485 uint32_t *puActualFreed, BOOL *pfScanForFreeBlocks)
2486{
2487 WRAPPER_NO_CONTRACT;
2488
2489 /*
2490 NOTHROW;
2491 GC_NOTRIGGER;
2492 MODE_ANY;
2493 */
2494
2495 // keep track of how many handles we have left to free
2496 uint32_t uRemain = uCount;
2497
2498 // fetch the user data for this block, if any
2499 uintptr_t *pBlockUserData = BlockFetchUserDataPointer(pSegment, uBlock, FALSE);
2500
2501 // compute the handle bounds for our block
2502 OBJECTHANDLE firstHandle = (OBJECTHANDLE)(pSegment->rgValue + (uBlock * HANDLE_HANDLES_PER_BLOCK));
2503 OBJECTHANDLE lastHandle = (OBJECTHANDLE)((_UNCHECKED_OBJECTREF *)firstHandle + HANDLE_HANDLES_PER_BLOCK);
2504
2505 // this variable will only stay TRUE if all masks we touch end up in the free state
2506 BOOL fAllMasksWeTouchedAreFree = TRUE;
2507
2508 // loop freeing handles until we encounter one outside our block or there are none left
2509 do
2510 {
2511 // fetch the next handle in the array
2512 OBJECTHANDLE handle = *pHandleBase;
2513
2514 // if the handle is outside our segment then we are done
2515 if ((handle < firstHandle) || (handle >= lastHandle))
2516 break;
2517
2518 // compute the mask that this handle resides in
2519 uint32_t uMask = (uint32_t)((handle - firstHandle) / HANDLE_HANDLES_PER_MASK);
2520
2521 // free as many handles as this mask owns from the front of the array
2522 uint32_t uFreed = BlockFreeHandlesInMask(pSegment, uBlock, uMask, pHandleBase, uRemain,
2523 pBlockUserData, puActualFreed, &fAllMasksWeTouchedAreFree);
2524
2525 // adjust our count and array pointer
2526 uRemain -= uFreed;
2527 pHandleBase += uFreed;
2528
2529 } while (uRemain);
2530
2531 // are all masks we touched free?
2532 if (fAllMasksWeTouchedAreFree)
2533 {
2534 // is the block unlocked?
2535 // NOTE: This check is incorrect and defeats the intended purpose of scavenging. If the
2536 // current block is locked and has just been emptied, then it cannot be removed right now
2537 // and therefore will nominally need to be scavenged. The only code that triggers
2538 // scavenging is in SegmentRemoveFreeBlocks, and setting the flag is the only way to
2539 // trigger a call into SegmentRemoveFreeBlocks call. As a result, by NOT setting the flag
2540 // this code is generally PREVENTING scavenging in exactly the cases where scavenging is
2541 // needed. The code is not being changed because it has always been this way and scavenging
2542 // itself generally has extremely low value.
2543 if (!BlockIsLocked(pSegment, uBlock))
2544 {
2545 // tell the caller it might be a good idea to scan for free blocks
2546 *pfScanForFreeBlocks = TRUE;
2547 }
2548 }
2549
2550 // return the number of handles we actually freed
2551 return (uCount - uRemain);
2552}
2553
2554
2555/*
2556 * SegmentFreeHandles
2557 *
2558 * Frees some portion of an array of handles of the specified type.
2559 * The array is scanned forward and handles are freed until a handle
2560 * from a different segment is encountered.
2561 *
2562 * Returns the number of handles that were freed from the front of the array.
2563 *
2564 */
2565uint32_t SegmentFreeHandles(TableSegment *pSegment, uint32_t uType, OBJECTHANDLE *pHandleBase, uint32_t uCount)
2566{
2567 WRAPPER_NO_CONTRACT;
2568
2569 /*
2570 NOTHROW;
2571 GC_NOTRIGGER;
2572 MODE_ANY;
2573 */
2574
2575 // keep track of how many handles we have left to free
2576 uint32_t uRemain = uCount;
2577
2578 // compute the handle bounds for our segment
2579 OBJECTHANDLE firstHandle = (OBJECTHANDLE)pSegment->rgValue;
2580 OBJECTHANDLE lastHandle = (OBJECTHANDLE)((_UNCHECKED_OBJECTREF *)firstHandle + HANDLE_HANDLES_PER_SEGMENT);
2581
2582 // the per-block free routines will set this if there is a chance some blocks went free
2583 BOOL fScanForFreeBlocks = FALSE;
2584
2585 // track the number of handles we actually free
2586 uint32_t uActualFreed = 0;
2587
2588 // loop freeing handles until we encounter one outside our segment or there are none left
2589 do
2590 {
2591 // fetch the next handle in the array
2592 OBJECTHANDLE handle = *pHandleBase;
2593
2594 // if the handle is outside our segment then we are done
2595 if ((handle < firstHandle) || (handle >= lastHandle))
2596 break;
2597
2598 // compute the block that this handle resides in
2599 uint32_t uBlock = (uint32_t)(((uintptr_t)handle - (uintptr_t)firstHandle) / (HANDLE_SIZE * HANDLE_HANDLES_PER_BLOCK));
2600
2601 // sanity check that this block is the type we expect to be freeing
2602 _ASSERTE(pSegment->rgBlockType[uBlock] == uType);
2603
2604 // free as many handles as this block owns from the front of the array
2605 uint32_t uFreed = BlockFreeHandles(pSegment, uBlock, pHandleBase, uRemain, &uActualFreed, &fScanForFreeBlocks);
2606
2607 // adjust our count and array pointer
2608 uRemain -= uFreed;
2609 pHandleBase += uFreed;
2610
2611 } while (uRemain);
2612
2613 // compute the number of handles we actually freed
2614 uint32_t uFreed = (uCount - uRemain);
2615
2616 // update the free count
2617 pSegment->rgFreeCount[uType] += uActualFreed;
2618
2619 // if we saw blocks that may have gone totally free then do a free scan
2620 if (fScanForFreeBlocks)
2621 {
2622 // assume we no scavenging is required
2623 BOOL fNeedsScavenging = FALSE;
2624
2625 // try to remove any free blocks we may have created
2626 SegmentRemoveFreeBlocks(pSegment, uType, &fNeedsScavenging);
2627
2628 // did SegmentRemoveFreeBlocks have to skip over any free blocks?
2629 if (fNeedsScavenging)
2630 {
2631 // yup, arrange to scavenge them later
2632 pSegment->fResortChains = TRUE;
2633 pSegment->fNeedsScavenging = TRUE;
2634 }
2635 }
2636
2637 // return the total number of handles we freed
2638 return uFreed;
2639}
2640
2641
2642/*
2643 * TableFreeBulkPreparedHandles
2644 *
2645 * Frees an array of handles of the specified type.
2646 *
2647 * This routine is optimized for a sorted array of handles but will accept any order.
2648 *
2649 */
2650void TableFreeBulkPreparedHandles(HandleTable *pTable, uint32_t uType, OBJECTHANDLE *pHandleBase, uint32_t uCount)
2651{
2652 //Update the count of handles marked as "used"
2653 pTable->dwCount -= uCount;
2654
2655 WRAPPER_NO_CONTRACT;
2656
2657 /*
2658 NOTHROW;
2659 GC_NOTRIGGER;
2660 MODE_ANY;
2661 */
2662
2663 // loop until all handles are freed
2664 do
2665 {
2666 // get the segment for the first handle
2667 TableSegment * pSegment = (TableSegment *)HandleFetchSegmentPointer(*pHandleBase);
2668
2669 // sanity
2670 _ASSERTE(pSegment->pHandleTable == pTable);
2671
2672 // free as many handles as this segment owns from the front of the array
2673 uint32_t uFreed = SegmentFreeHandles(pSegment, uType, pHandleBase, uCount);
2674
2675 // adjust our count and array pointer
2676 uCount -= uFreed;
2677 pHandleBase += uFreed;
2678
2679 } while (uCount);
2680}
2681
2682
2683/*
2684 * TableFreeBulkUnpreparedHandlesWorker
2685 *
2686 * Frees an array of handles of the specified type by preparing them and calling TableFreeBulkPreparedHandles.
2687 * Uses the supplied scratch buffer to prepare the handles.
2688 *
2689 */
2690void TableFreeBulkUnpreparedHandlesWorker(HandleTable *pTable, uint32_t uType, const OBJECTHANDLE *pHandles, uint32_t uCount,
2691 OBJECTHANDLE *pScratchBuffer)
2692{
2693 WRAPPER_NO_CONTRACT;
2694
2695 // copy the handles into the destination buffer
2696 memcpy(pScratchBuffer, pHandles, uCount * sizeof(OBJECTHANDLE));
2697
2698 // sort them for optimal free order
2699 QuickSort((uintptr_t *)pScratchBuffer, 0, uCount - 1, CompareHandlesByFreeOrder);
2700
2701 // make sure the handles are zeroed too
2702 ZeroHandles(pScratchBuffer, uCount);
2703
2704 // prepare and free these handles
2705 TableFreeBulkPreparedHandles(pTable, uType, pScratchBuffer, uCount);
2706}
2707
2708
2709/*
2710 * TableFreeBulkUnpreparedHandles
2711 *
2712 * Frees an array of handles of the specified type by preparing them and calling
2713 * TableFreeBulkPreparedHandlesWorker one or more times.
2714 *
2715 */
2716void TableFreeBulkUnpreparedHandles(HandleTable *pTable, uint32_t uType, const OBJECTHANDLE *pHandles, uint32_t uCount)
2717{
2718 CONTRACTL
2719 {
2720 NOTHROW;
2721 WRAPPER(GC_TRIGGERS);
2722 }
2723 CONTRACTL_END;
2724
2725 // preparation / free buffer
2726 OBJECTHANDLE rgStackHandles[HANDLE_HANDLES_PER_BLOCK];
2727 OBJECTHANDLE *pScratchBuffer = rgStackHandles;
2728 OBJECTHANDLE *pLargeScratchBuffer = NULL;
2729 uint32_t uFreeGranularity = _countof(rgStackHandles);
2730
2731 // if there are more handles than we can put on the stack then try to allocate a sorting buffer
2732 if (uCount > uFreeGranularity)
2733 {
2734 // try to allocate a bigger buffer to work in
2735 pLargeScratchBuffer = new (nothrow) OBJECTHANDLE[uCount];
2736
2737 // did we get it?
2738 if (pLargeScratchBuffer)
2739 {
2740 // yes - use this buffer to prepare and free the handles
2741 pScratchBuffer = pLargeScratchBuffer;
2742 uFreeGranularity = uCount;
2743 }
2744 }
2745
2746 // loop freeing handles until we have freed them all
2747 while (uCount)
2748 {
2749 // decide how many we can process in this iteration
2750 if (uFreeGranularity > uCount)
2751 uFreeGranularity = uCount;
2752
2753 // prepare and free these handles
2754 TableFreeBulkUnpreparedHandlesWorker(pTable, uType, pHandles, uFreeGranularity, pScratchBuffer);
2755
2756 // adjust our pointers and move on
2757 uCount -= uFreeGranularity;
2758 pHandles += uFreeGranularity;
2759 }
2760
2761 // if we allocated a sorting buffer then free it now
2762 if (pLargeScratchBuffer)
2763 delete [] pLargeScratchBuffer;
2764}
2765
2766#endif // !DACCESS_COMPILE
2767
2768/*--------------------------------------------------------------------------*/
2769
2770
2771