1 | // Licensed to the .NET Foundation under one or more agreements. |
---|---|
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | /* |
6 | * GCHELPERS.INL |
7 | * |
8 | * GC Allocation and Write Barrier Helpers |
9 | * |
10 | * |
11 | */ |
12 | |
13 | #ifndef _GCHELPERS_INL_ |
14 | #define _GCHELPERS_INL_ |
15 | |
16 | //======================================================================== |
17 | // |
18 | // WRITE BARRIER HELPERS |
19 | // |
20 | //======================================================================== |
21 | |
22 | #if defined(_WIN64) |
23 | static const int card_byte_shift = 11; |
24 | static const int card_bundle_byte_shift = 21; |
25 | #else |
26 | static const int card_byte_shift = 10; |
27 | |
28 | #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES |
29 | #error Manually managed card bundles are currently only implemented for AMD64. |
30 | #endif |
31 | #endif |
32 | |
33 | FORCEINLINE void InlinedSetCardsAfterBulkCopyHelper(Object **start, size_t len) |
34 | { |
35 | // Check whether the writes were even into the heap. If not there's no card update required. |
36 | // Also if the size is smaller than a pointer, no write barrier is required. |
37 | _ASSERTE(len >= sizeof(uintptr_t)); |
38 | if ((BYTE*)start < g_lowest_address || (BYTE*)start >= g_highest_address) |
39 | { |
40 | return; |
41 | } |
42 | |
43 | // Don't optimize the Generation 0 case if we are checking for write barrier violations |
44 | // since we need to update the shadow heap even in the generation 0 case. |
45 | #if defined (WRITE_BARRIER_CHECK) && !defined (SERVER_GC) |
46 | if (g_pConfig->GetHeapVerifyLevel() & EEConfig::HEAPVERIFY_BARRIERCHECK) |
47 | { |
48 | for(unsigned i=0; i < len / sizeof(Object*); i++) |
49 | { |
50 | updateGCShadow(&start[i], start[i]); |
51 | } |
52 | } |
53 | #endif //WRITE_BARRIER_CHECK && !SERVER_GC |
54 | |
55 | #ifdef FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP |
56 | if (GCHeapUtilities::SoftwareWriteWatchIsEnabled()) |
57 | { |
58 | GCHeapUtilities::SoftwareWriteWatchSetDirtyRegion(start, len); |
59 | } |
60 | #endif // FEATURE_USE_SOFTWARE_WRITE_WATCH_FOR_GC_HEAP |
61 | |
62 | size_t startAddress = (size_t)start; |
63 | size_t endAddress = startAddress + len; |
64 | size_t startingClump = startAddress >> card_byte_shift; |
65 | size_t endingClump = (endAddress + (1 << card_byte_shift) - 1) >> card_byte_shift; |
66 | |
67 | // calculate the number of clumps to mark (round_up(end) - start) |
68 | size_t clumpCount = endingClump - startingClump; |
69 | // VolatileLoadWithoutBarrier() is used here to prevent fetch of g_card_table from being reordered |
70 | // with g_lowest/highest_address check at the beginning of this function. |
71 | uint8_t* card = ((uint8_t*)VolatileLoadWithoutBarrier(&g_card_table)) + startingClump; |
72 | |
73 | // Fill the cards. To avoid cache line thrashing we check whether the cards have already been set before |
74 | // writing. |
75 | do |
76 | { |
77 | if (*card != 0xff) |
78 | { |
79 | *card = 0xff; |
80 | } |
81 | |
82 | card++; |
83 | clumpCount--; |
84 | } |
85 | while (clumpCount != 0); |
86 | |
87 | #ifdef FEATURE_MANUALLY_MANAGED_CARD_BUNDLES |
88 | size_t startBundleByte = startAddress >> card_bundle_byte_shift; |
89 | size_t endBundleByte = (endAddress + (1 << card_bundle_byte_shift) - 1) >> card_bundle_byte_shift; |
90 | size_t bundleByteCount = endBundleByte - startBundleByte; |
91 | |
92 | uint8_t* pBundleByte = ((uint8_t*)VolatileLoadWithoutBarrier(&g_card_bundle_table)) + startBundleByte; |
93 | |
94 | do |
95 | { |
96 | if (*pBundleByte != 0xFF) |
97 | { |
98 | *pBundleByte = 0xFF; |
99 | } |
100 | |
101 | pBundleByte++; |
102 | bundleByteCount--; |
103 | } |
104 | while (bundleByteCount != 0); |
105 | #endif |
106 | } |
107 | |
108 | #endif // !_GCHELPERS_INL_ |
109 |