| 1 | // Licensed to the .NET Foundation under one or more agreements. |
| 2 | // The .NET Foundation licenses this file to you under the MIT license. |
| 3 | // See the LICENSE file in the project root for more information. |
| 4 | // |
| 5 | |
| 6 | // |
| 7 | // Abstract base class implementation of a hash table suitable for efficient serialization into an ngen image. |
| 8 | // See NgenHash.h for a more detailed description. |
| 9 | // |
| 10 | |
| 11 | // Our implementation embeds entry data supplied by the hash sub-class into a larger entry structure |
| 12 | // containing NgenHash metadata. We often end up returning pointers to the inner entry to sub-class code and |
| 13 | // doing this in a DAC-friendly fashion involves some DAC gymnastics. The following couple of macros factor |
| 14 | // those complexities out. |
| 15 | #define VALUE_FROM_VOLATILE_ENTRY(_ptr) dac_cast<DPTR(VALUE)>(PTR_TO_MEMBER_TADDR(VolatileEntry, (_ptr), m_sValue)) |
| 16 | #define VALUE_FROM_PERSISTED_ENTRY(_ptr) dac_cast<DPTR(VALUE)>(PTR_TO_MEMBER_TADDR(PersistedEntry, (_ptr), m_sValue)) |
| 17 | |
| 18 | // We provide a mechanism for the sub-class to extend per-entry operations via a callback mechanism where the |
| 19 | // sub-class implements methods with a certain name and signature (details in the module header for |
| 20 | // NgenHash.h). We could have used virtual methods, but this adds a needless indirection since all the details |
| 21 | // are known statically. In order to have a base class call a method defined only in a sub-class however we |
| 22 | // need a little pointer trickery. The following macro hides that. |
| 23 | #define DOWNCALL(_method) ((FINAL_CLASS*)this)->_method |
| 24 | |
| 25 | #ifndef DACCESS_COMPILE |
| 26 | |
| 27 | // Base constructor. Call this from your derived constructor to provide the owning module, loader heap and |
| 28 | // initial number of buckets (which must be non-zero). Module must be provided if this hash is to be |
| 29 | // serialized into an ngen image. It is exposed to the derived hash class (many need it) but otherwise is only |
| 30 | // used to locate a loader heap for allocating bucket lists and entries unless an alternative heap is |
| 31 | // provided. Note that the heap provided is not serialized (so you'll allocate from that heap at ngen-time, |
| 32 | // but revert to allocating from the module's heap at runtime). If no Module pointer is supplied (non-ngen'd |
| 33 | // hash table) you must provide a direct heap pointer. |
| 34 | template <NGEN_HASH_PARAMS> |
| 35 | NgenHashTable<NGEN_HASH_ARGS>::NgenHashTable(Module *pModule, LoaderHeap *pHeap, DWORD cInitialBuckets) |
| 36 | { |
| 37 | CONTRACTL |
| 38 | { |
| 39 | THROWS; |
| 40 | GC_NOTRIGGER; |
| 41 | MODE_ANY; |
| 42 | } |
| 43 | CONTRACTL_END; |
| 44 | |
| 45 | // An invariant in the code is that we always have a non-zero number of warm buckets. |
| 46 | _ASSERTE(cInitialBuckets > 0); |
| 47 | |
| 48 | // At least one of module or heap must have been specified or we won't know how to allocate entries and |
| 49 | // buckets. |
| 50 | _ASSERTE(pModule || pHeap); |
| 51 | m_pModule.SetValueMaybeNull(pModule); |
| 52 | m_pHeap = pHeap; |
| 53 | |
| 54 | S_SIZE_T cbBuckets = S_SIZE_T(sizeof(VolatileEntry*)) * S_SIZE_T(cInitialBuckets); |
| 55 | |
| 56 | m_cWarmEntries = 0; |
| 57 | m_cWarmBuckets = cInitialBuckets; |
| 58 | m_pWarmBuckets.SetValue((PTR_VolatileEntry*)(void*)GetHeap()->AllocMem(cbBuckets)); |
| 59 | |
| 60 | // Note: Memory allocated on loader heap is zero filled |
| 61 | // memset(m_pWarmBuckets, 0, sizeof(VolatileEntry*) * cInitialBuckets); |
| 62 | |
| 63 | #ifdef FEATURE_PREJIT |
| 64 | memset(&m_sHotEntries, 0, sizeof(PersistedEntries)); |
| 65 | memset(&m_sColdEntries, 0, sizeof(PersistedEntries)); |
| 66 | m_cInitialBuckets = cInitialBuckets; |
| 67 | #endif // FEATURE_PREJIT |
| 68 | } |
| 69 | |
| 70 | // Allocate an uninitialized entry for the hash table (it's not inserted). The AllocMemTracker is optional and |
| 71 | // may be specified as NULL for untracked allocations. This is split from the hash insertion logic so that |
| 72 | // callers can pre-allocate entries and then perform insertions which cannot fault. |
| 73 | template <NGEN_HASH_PARAMS> |
| 74 | VALUE *NgenHashTable<NGEN_HASH_ARGS>::BaseAllocateEntry(AllocMemTracker *pamTracker) |
| 75 | { |
| 76 | CONTRACTL |
| 77 | { |
| 78 | THROWS; |
| 79 | GC_NOTRIGGER; |
| 80 | MODE_ANY; |
| 81 | } |
| 82 | CONTRACTL_END; |
| 83 | |
| 84 | // Faults are forbidden in BaseInsertEntry. Make the table writeable now that the faults are still allowed. |
| 85 | EnsureWritablePages(this); |
| 86 | EnsureWritablePages(this->GetWarmBuckets(), m_cWarmBuckets * sizeof(PTR_VolatileEntry)); |
| 87 | |
| 88 | TaggedMemAllocPtr pMemory = GetHeap()->AllocMem(S_SIZE_T(sizeof(VolatileEntry))); |
| 89 | |
| 90 | VolatileEntry *pEntry; |
| 91 | if (pamTracker) |
| 92 | pEntry = (VolatileEntry*)pamTracker->Track(pMemory); |
| 93 | else |
| 94 | pEntry = pMemory.cast<VolatileEntry*>(); |
| 95 | |
| 96 | #ifdef _DEBUG |
| 97 | // In debug builds try and catch cases where code attempts to use entries not allocated via this method. |
| 98 | pEntry->m_pNextEntry = (VolatileEntry*)0x12345678; |
| 99 | #endif |
| 100 | |
| 101 | return &pEntry->m_sValue; |
| 102 | } |
| 103 | |
| 104 | // Determine loader heap to be used for allocation of entries and bucket lists. |
| 105 | template <NGEN_HASH_PARAMS> |
| 106 | LoaderHeap *NgenHashTable<NGEN_HASH_ARGS>::GetHeap() |
| 107 | { |
| 108 | CONTRACTL |
| 109 | { |
| 110 | NOTHROW; |
| 111 | GC_NOTRIGGER; |
| 112 | MODE_ANY; |
| 113 | } |
| 114 | CONTRACTL_END; |
| 115 | |
| 116 | // Explicitly provided heap takes priority. |
| 117 | if (m_pHeap) |
| 118 | return m_pHeap; |
| 119 | |
| 120 | // If not specified then we fall back to the owning module's heap (a module must have been specified in |
| 121 | // this case). |
| 122 | _ASSERTE(!m_pModule.IsNull()); |
| 123 | return GetModule()->GetAssembly()->GetLowFrequencyHeap(); |
| 124 | } |
| 125 | |
| 126 | // Insert an entry previously allocated via BaseAllocateEntry (you cannot allocated entries in any other |
| 127 | // manner) and associated with the given hash value. The entry should have been initialized prior to |
| 128 | // insertion. |
| 129 | template <NGEN_HASH_PARAMS> |
| 130 | void NgenHashTable<NGEN_HASH_ARGS>::BaseInsertEntry(NgenHashValue iHash, VALUE *pEntry) |
| 131 | { |
| 132 | CONTRACTL |
| 133 | { |
| 134 | NOTHROW; |
| 135 | GC_NOTRIGGER; |
| 136 | MODE_ANY; |
| 137 | } |
| 138 | CONTRACTL_END; |
| 139 | |
| 140 | // We are always guaranteed at least one warm bucket (which is important here: some hash table sub-classes |
| 141 | // require entry insertion to be fault free). |
| 142 | _ASSERTE(m_cWarmBuckets > 0); |
| 143 | |
| 144 | // Recover the volatile entry pointer from the sub-class entry pointer passed to us. In debug builds |
| 145 | // attempt to validate that this transform is really valid and the caller didn't attempt to allocate the |
| 146 | // entry via some other means than BaseAllocateEntry(). |
| 147 | PTR_VolatileEntry pVolatileEntry = (PTR_VolatileEntry)((BYTE*)pEntry - offsetof(VolatileEntry, m_sValue)); |
| 148 | _ASSERTE(pVolatileEntry->m_pNextEntry == (VolatileEntry*)0x12345678); |
| 149 | |
| 150 | // Remember the entry hash code. |
| 151 | pVolatileEntry->m_iHashValue = iHash; |
| 152 | |
| 153 | // Compute which bucket the entry belongs in based on the hash. |
| 154 | DWORD dwBucket = iHash % m_cWarmBuckets; |
| 155 | |
| 156 | // Prepare to link the new entry at the head of the bucket chain. |
| 157 | pVolatileEntry->m_pNextEntry = (GetWarmBuckets())[dwBucket]; |
| 158 | |
| 159 | // Make sure that all writes to the entry are visible before publishing the entry. |
| 160 | MemoryBarrier(); |
| 161 | |
| 162 | // Publish the entry by pointing the bucket at it. |
| 163 | (GetWarmBuckets())[dwBucket] = pVolatileEntry; |
| 164 | |
| 165 | m_cWarmEntries++; |
| 166 | |
| 167 | // If the insertion pushed the table load over our limit then attempt to grow the bucket list. Note that |
| 168 | // we ignore any failure (this is a performance operation and is not required for correctness). |
| 169 | if (m_cWarmEntries > (2 * m_cWarmBuckets)) |
| 170 | GrowTable(); |
| 171 | } |
| 172 | |
| 173 | // Increase the size of the bucket list in order to reduce the size of bucket chains. Does nothing on failure |
| 174 | // to allocate (since this impacts perf, not correctness). |
| 175 | template <NGEN_HASH_PARAMS> |
| 176 | void NgenHashTable<NGEN_HASH_ARGS>::GrowTable() |
| 177 | { |
| 178 | CONTRACTL |
| 179 | { |
| 180 | INSTANCE_CHECK; |
| 181 | NOTHROW; |
| 182 | GC_NOTRIGGER; |
| 183 | MODE_ANY; |
| 184 | } |
| 185 | CONTRACTL_END; |
| 186 | |
| 187 | // If we can't increase the number of buckets, we lose perf but not correctness. So we won't report this |
| 188 | // error to our caller. |
| 189 | FAULT_NOT_FATAL(); |
| 190 | |
| 191 | // Make the new bucket table larger by the scale factor requested by the subclass (but also prime). |
| 192 | DWORD cNewBuckets = NextLargestPrime(m_cWarmBuckets * SCALE_FACTOR); |
| 193 | S_SIZE_T cbNewBuckets = S_SIZE_T(cNewBuckets) * S_SIZE_T(sizeof(PTR_VolatileEntry)); |
| 194 | PTR_VolatileEntry *pNewBuckets = (PTR_VolatileEntry*)(void*)GetHeap()->AllocMem_NoThrow(cbNewBuckets); |
| 195 | if (!pNewBuckets) |
| 196 | return; |
| 197 | |
| 198 | // All buckets are initially empty. |
| 199 | // Note: Memory allocated on loader heap is zero filled |
| 200 | // memset(pNewBuckets, 0, cNewBuckets * sizeof(PTR_VolatileEntry)); |
| 201 | |
| 202 | // Run through the old table and transfer all the entries. Be sure not to mess with the integrity of the |
| 203 | // old table while we are doing this, as there can be concurrent readers! Note that it is OK if the |
| 204 | // concurrent reader misses out on a match, though - they will have to acquire the lock on a miss & try |
| 205 | // again. |
| 206 | for (DWORD i = 0; i < m_cWarmBuckets; i++) |
| 207 | { |
| 208 | PTR_VolatileEntry pEntry = (GetWarmBuckets())[i]; |
| 209 | |
| 210 | // Try to lock out readers from scanning this bucket. This is obviously a race which may fail. |
| 211 | // However, note that it's OK if somebody is already in the list - it's OK if we mess with the bucket |
| 212 | // groups, as long as we don't destroy anything. The lookup function will still do appropriate |
| 213 | // comparison even if it wanders aimlessly amongst entries while we are rearranging things. If a |
| 214 | // lookup finds a match under those circumstances, great. If not, they will have to acquire the lock & |
| 215 | // try again anyway. |
| 216 | (GetWarmBuckets())[i] = NULL; |
| 217 | |
| 218 | while (pEntry != NULL) |
| 219 | { |
| 220 | DWORD dwNewBucket = pEntry->m_iHashValue % cNewBuckets; |
| 221 | PTR_VolatileEntry pNextEntry = pEntry->m_pNextEntry; |
| 222 | |
| 223 | pEntry->m_pNextEntry = pNewBuckets[dwNewBucket]; |
| 224 | pNewBuckets[dwNewBucket] = pEntry; |
| 225 | |
| 226 | pEntry = pNextEntry; |
| 227 | } |
| 228 | } |
| 229 | |
| 230 | // Make sure that all writes are visible before publishing the new array. |
| 231 | MemoryBarrier(); |
| 232 | m_pWarmBuckets.SetValue(pNewBuckets); |
| 233 | |
| 234 | // The new number of buckets has to be published last (prior to this readers may miscalculate a bucket |
| 235 | // index, but the result will always be in range and they'll simply walk the wrong chain and get a miss, |
| 236 | // prompting a retry under the lock). If we let the count become visible unordered wrt to the bucket array |
| 237 | // itself a reader could potentially read buckets from beyond the end of the old bucket list). |
| 238 | MemoryBarrier(); |
| 239 | m_cWarmBuckets = cNewBuckets; |
| 240 | } |
| 241 | |
| 242 | // Returns the next prime larger (or equal to) than the number given. |
| 243 | template <NGEN_HASH_PARAMS> |
| 244 | DWORD NgenHashTable<NGEN_HASH_ARGS>::NextLargestPrime(DWORD dwNumber) |
| 245 | { |
| 246 | for (DWORD i = 0; i < COUNTOF(g_rgPrimes); i++) |
| 247 | if (g_rgPrimes[i] >= dwNumber) |
| 248 | { |
| 249 | dwNumber = g_rgPrimes[i]; |
| 250 | break; |
| 251 | } |
| 252 | |
| 253 | return dwNumber; |
| 254 | } |
| 255 | #endif // !DACCESS_COMPILE |
| 256 | |
| 257 | // Return the number of entries held in the table (does not include entries allocated but not inserted yet). |
| 258 | template <NGEN_HASH_PARAMS> |
| 259 | DWORD NgenHashTable<NGEN_HASH_ARGS>::BaseGetElementCount() |
| 260 | { |
| 261 | LIMITED_METHOD_DAC_CONTRACT; |
| 262 | |
| 263 | return m_cWarmEntries |
| 264 | #ifdef FEATURE_PREJIT |
| 265 | + m_sHotEntries.m_cEntries + m_sColdEntries.m_cEntries |
| 266 | #endif |
| 267 | ; |
| 268 | } |
| 269 | |
| 270 | // Find first entry matching a given hash value (returns NULL on no match). Call BaseFindNextEntryByHash to |
| 271 | // iterate the remaining matches (until it returns NULL). The LookupContext supplied by the caller is |
| 272 | // initialized by BaseFindFirstEntryByHash and read/updated by BaseFindNextEntryByHash to keep track of where |
| 273 | // we are. |
| 274 | template <NGEN_HASH_PARAMS> |
| 275 | DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::BaseFindFirstEntryByHash(NgenHashValue iHash, LookupContext *pContext) |
| 276 | { |
| 277 | CONTRACTL |
| 278 | { |
| 279 | NOTHROW; |
| 280 | GC_NOTRIGGER; |
| 281 | MODE_ANY; |
| 282 | SUPPORTS_DAC; |
| 283 | PRECONDITION(CheckPointer(pContext)); |
| 284 | } |
| 285 | CONTRACTL_END; |
| 286 | |
| 287 | DPTR(VALUE) pEntry; |
| 288 | |
| 289 | #ifdef FEATURE_PREJIT |
| 290 | // Look in the hot entries first. |
| 291 | pEntry = FindPersistedEntryByHash(&m_sHotEntries, iHash, pContext); |
| 292 | if (pEntry) |
| 293 | return pEntry; |
| 294 | #endif // FEATURE_PREJIT |
| 295 | |
| 296 | // Then the warm entries. |
| 297 | pEntry = FindVolatileEntryByHash(iHash, pContext); |
| 298 | if (pEntry) |
| 299 | return pEntry; |
| 300 | |
| 301 | #ifdef FEATURE_PREJIT |
| 302 | // And finally the cold entries. |
| 303 | return FindPersistedEntryByHash(&m_sColdEntries, iHash, pContext); |
| 304 | #else // FEATURE_PREJIT |
| 305 | return NULL; |
| 306 | #endif // FEATURE_PREJIT |
| 307 | } |
| 308 | |
| 309 | // Find first entry matching a given hash value (returns NULL on no match). Call BaseFindNextEntryByHash to |
| 310 | // iterate the remaining matches (until it returns NULL). The LookupContext supplied by the caller is |
| 311 | // initialized by BaseFindFirstEntryByHash and read/updated by BaseFindNextEntryByHash to keep track of where |
| 312 | // we are. |
| 313 | template <NGEN_HASH_PARAMS> |
| 314 | DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::BaseFindNextEntryByHash(LookupContext *pContext) |
| 315 | { |
| 316 | CONTRACTL |
| 317 | { |
| 318 | NOTHROW; |
| 319 | GC_NOTRIGGER; |
| 320 | MODE_ANY; |
| 321 | SUPPORTS_DAC; |
| 322 | PRECONDITION(CheckPointer(pContext)); |
| 323 | } |
| 324 | CONTRACTL_END; |
| 325 | |
| 326 | NgenHashValue iHash; |
| 327 | |
| 328 | switch (pContext->m_eType) |
| 329 | { |
| 330 | #ifdef FEATURE_PREJIT |
| 331 | case Hot: |
| 332 | case Cold: |
| 333 | { |
| 334 | // Fetch the entry we were looking at last from the context and remember the corresponding hash code. |
| 335 | PTR_PersistedEntry pPersistedEntry = dac_cast<PTR_PersistedEntry>(pContext->m_pEntry); |
| 336 | iHash = pPersistedEntry->m_iHashValue; |
| 337 | |
| 338 | // Iterate while there are still entries left in the bucket chain. |
| 339 | while (pContext->m_cRemainingEntries) |
| 340 | { |
| 341 | // Advance to next entry, reducing the number of entries left to scan. |
| 342 | pPersistedEntry++; |
| 343 | pContext->m_cRemainingEntries--; |
| 344 | |
| 345 | if (pPersistedEntry->m_iHashValue == iHash) |
| 346 | { |
| 347 | // Found a match on hash code. Update our find context to indicate where we got to and return |
| 348 | // a pointer to the sub-class portion of the entry. |
| 349 | pContext->m_pEntry = dac_cast<TADDR>(pPersistedEntry); |
| 350 | return VALUE_FROM_PERSISTED_ENTRY(pPersistedEntry); |
| 351 | } |
| 352 | } |
| 353 | |
| 354 | // We didn't find a match. |
| 355 | if (pContext->m_eType == Hot) |
| 356 | { |
| 357 | // If we were searching the hot entries then we should try the warm entries next. |
| 358 | DPTR(VALUE) pNext = FindVolatileEntryByHash(iHash, pContext); |
| 359 | if (pNext) |
| 360 | return pNext; |
| 361 | |
| 362 | // If that didn't work try the cold entries. |
| 363 | return FindPersistedEntryByHash(&m_sColdEntries, iHash, pContext); |
| 364 | } |
| 365 | |
| 366 | // We were already searching cold entries, a failure here means the entry is not in the table. |
| 367 | return NULL; |
| 368 | } |
| 369 | #endif // FEATURE_PREJIT |
| 370 | |
| 371 | case Warm: |
| 372 | { |
| 373 | // Fetch the entry we were looking at last from the context and remember the corresponding hash code. |
| 374 | PTR_VolatileEntry pVolatileEntry = dac_cast<PTR_VolatileEntry>(pContext->m_pEntry); |
| 375 | iHash = pVolatileEntry->m_iHashValue; |
| 376 | |
| 377 | // Iterate over the bucket chain. |
| 378 | while (pVolatileEntry->m_pNextEntry) |
| 379 | { |
| 380 | // Advance to the next entry. |
| 381 | pVolatileEntry = pVolatileEntry->m_pNextEntry; |
| 382 | if (pVolatileEntry->m_iHashValue == iHash) |
| 383 | { |
| 384 | // Found a match on hash code. Update our find context to indicate where we got to and return |
| 385 | // a pointer to the sub-class portion of the entry. |
| 386 | pContext->m_pEntry = dac_cast<TADDR>(pVolatileEntry); |
| 387 | return VALUE_FROM_VOLATILE_ENTRY(pVolatileEntry); |
| 388 | } |
| 389 | } |
| 390 | |
| 391 | // We didn't find a match, fall through to the cold entries. |
| 392 | #ifdef FEATURE_PREJIT |
| 393 | return FindPersistedEntryByHash(&m_sColdEntries, iHash, pContext); |
| 394 | #else |
| 395 | return NULL; |
| 396 | #endif |
| 397 | } |
| 398 | |
| 399 | default: |
| 400 | _ASSERTE(!"Unknown NgenHashTable entry type" ); |
| 401 | return NULL; |
| 402 | } |
| 403 | } |
| 404 | |
| 405 | #ifdef FEATURE_PREJIT |
| 406 | |
| 407 | // Allocate and initialize a new list with the given count of buckets and configured to hold no more than the |
| 408 | // given number of entries or have a bucket chain longer than the specified maximum. These two maximums allow |
| 409 | // the implementation to choose an optimal data format for the bucket list at runtime and are enforced by |
| 410 | // asserts in the debug build. |
| 411 | // static |
| 412 | template <NGEN_HASH_PARAMS> |
| 413 | typename NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList *NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::CreateList(DWORD cBuckets, DWORD cEntries, DWORD cMaxEntriesInBucket) |
| 414 | { |
| 415 | CONTRACTL |
| 416 | { |
| 417 | THROWS; |
| 418 | GC_NOTRIGGER; |
| 419 | MODE_ANY; |
| 420 | } |
| 421 | CONTRACTL_END; |
| 422 | |
| 423 | // The size of each bucket depends on the number of entries we need to store and how big a bucket chain |
| 424 | // ever gets. |
| 425 | DWORD cbBucket = GetBucketSize(cEntries, cMaxEntriesInBucket); |
| 426 | |
| 427 | // Allocate enough memory to store the bucket list header and bucket array. |
| 428 | S_SIZE_T cbBuckets = S_SIZE_T(sizeof(PersistedBucketList)) + (S_SIZE_T(cbBucket) * S_SIZE_T(cBuckets)); |
| 429 | if (cbBuckets.IsOverflow()) |
| 430 | COMPlusThrowOM(); |
| 431 | PersistedBucketList *pBucketList = (PersistedBucketList*)(new BYTE[cbBuckets.Value()]); |
| 432 | |
| 433 | #ifdef _DEBUG |
| 434 | // In debug builds we store all the input parameters to validate subsequent requests. In retail none of |
| 435 | // this data is needed. |
| 436 | pBucketList->m_cBuckets = cBuckets; |
| 437 | pBucketList->m_cEntries = cEntries; |
| 438 | pBucketList->m_cMaxEntriesInBucket = cMaxEntriesInBucket; |
| 439 | #endif // _DEBUG |
| 440 | |
| 441 | pBucketList->m_cbBucket = cbBucket; |
| 442 | pBucketList->m_dwEntryCountShift = BitsRequired(cEntries); |
| 443 | pBucketList->m_dwInitialEntryMask = (1 << pBucketList->m_dwEntryCountShift) - 1; |
| 444 | |
| 445 | // Zero all the buckets (empty all the bucket chains). |
| 446 | memset(pBucketList + 1, 0, cBuckets * cbBucket); |
| 447 | |
| 448 | return pBucketList; |
| 449 | } |
| 450 | |
| 451 | // Get the size in bytes of this entire bucket list (need to pass in the bucket count since we save space by |
| 452 | // not storing it here, but we do validate this in debug mode). |
| 453 | template <NGEN_HASH_PARAMS> |
| 454 | size_t NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::GetSize(DWORD cBuckets) |
| 455 | { |
| 456 | LIMITED_METHOD_DAC_CONTRACT; |
| 457 | |
| 458 | _ASSERTE(cBuckets == m_cBuckets); |
| 459 | return sizeof(PersistedBucketList) + (cBuckets * m_cbBucket); |
| 460 | } |
| 461 | |
| 462 | // Get the initial entry index and entry count for the given bucket. Initial entry index value is undefined |
| 463 | // when count comes back as zero. |
| 464 | template <NGEN_HASH_PARAMS> |
| 465 | void NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::GetBucket(DWORD dwIndex, DWORD *pdwFirstEntry, DWORD *pdwCount) |
| 466 | { |
| 467 | CONTRACTL |
| 468 | { |
| 469 | NOTHROW; |
| 470 | GC_NOTRIGGER; |
| 471 | MODE_ANY; |
| 472 | SUPPORTS_DAC; |
| 473 | } |
| 474 | CONTRACTL_END; |
| 475 | |
| 476 | _ASSERTE(dwIndex < m_cBuckets); |
| 477 | |
| 478 | // Find the start of the bucket we're interested in based on the index and size chosen for buckets in this |
| 479 | // list instance. |
| 480 | TADDR pBucket = dac_cast<TADDR>(this) + sizeof(PersistedBucketList) + (dwIndex * m_cbBucket); |
| 481 | |
| 482 | // Handle each format of bucket separately. In all cases read the correct number of bytes to form one |
| 483 | // bitfield, extract the low order bits to retrieve the initial entry index and shift down the remaining |
| 484 | // bits to obtain the entry count. |
| 485 | switch (m_cbBucket) |
| 486 | { |
| 487 | case 2: |
| 488 | { |
| 489 | _ASSERTE(m_dwEntryCountShift < 16 && m_dwInitialEntryMask < 0xffff); |
| 490 | |
| 491 | WORD wBucketContents = *dac_cast<PTR_WORD>(pBucket); |
| 492 | |
| 493 | *pdwFirstEntry = wBucketContents & m_dwInitialEntryMask; |
| 494 | *pdwCount = wBucketContents >> m_dwEntryCountShift; |
| 495 | |
| 496 | break; |
| 497 | } |
| 498 | |
| 499 | case 4: |
| 500 | { |
| 501 | _ASSERTE(m_dwEntryCountShift < 32 && m_dwInitialEntryMask < 0xffffffff); |
| 502 | |
| 503 | DWORD dwBucketContents = *dac_cast<PTR_DWORD>(pBucket); |
| 504 | |
| 505 | *pdwFirstEntry = dwBucketContents & m_dwInitialEntryMask; |
| 506 | *pdwCount = dwBucketContents >> m_dwEntryCountShift; |
| 507 | |
| 508 | break; |
| 509 | } |
| 510 | |
| 511 | case 8: |
| 512 | { |
| 513 | _ASSERTE(m_dwEntryCountShift < 64); |
| 514 | |
| 515 | ULONG64 qwBucketContents = *dac_cast<PTR_ULONG64>(pBucket); |
| 516 | |
| 517 | *pdwFirstEntry = (DWORD)(qwBucketContents & m_dwInitialEntryMask); |
| 518 | *pdwCount = (DWORD)(qwBucketContents >> m_dwEntryCountShift); |
| 519 | |
| 520 | break; |
| 521 | } |
| 522 | |
| 523 | default: |
| 524 | #ifdef DACCESS_COMPILE |
| 525 | // Minidumps don't guarantee this will work - memory may not have been dumped, target corrupted, etc. |
| 526 | *pdwFirstEntry = 0; |
| 527 | *pdwCount = 0; |
| 528 | #else |
| 529 | _ASSERTE(!"Invalid bucket list bucket size" ); |
| 530 | #endif |
| 531 | } |
| 532 | |
| 533 | _ASSERTE((*pdwFirstEntry < m_cEntries) || (*pdwCount == 0)); |
| 534 | _ASSERTE(*pdwCount <= m_cMaxEntriesInBucket); |
| 535 | } |
| 536 | |
| 537 | // Simplified initial entry index when you don't need the count (don't call this for buckets with zero |
| 538 | // entries). |
| 539 | template <NGEN_HASH_PARAMS> |
| 540 | DWORD NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::GetInitialEntry(DWORD dwIndex) |
| 541 | { |
| 542 | CONTRACTL |
| 543 | { |
| 544 | NOTHROW; |
| 545 | GC_NOTRIGGER; |
| 546 | MODE_ANY; |
| 547 | SUPPORTS_DAC; |
| 548 | } |
| 549 | CONTRACTL_END; |
| 550 | |
| 551 | DWORD dwInitialEntry, dwEntryCount; |
| 552 | GetBucket(dwIndex, &dwInitialEntry, &dwEntryCount); |
| 553 | |
| 554 | _ASSERTE(dwEntryCount > 0); |
| 555 | |
| 556 | return dwInitialEntry; |
| 557 | } |
| 558 | |
| 559 | // For the given bucket set the index of the initial entry and the count of entries in the chain. If the count |
| 560 | // is zero the initial entry index is meaningless and ignored. |
| 561 | template <NGEN_HASH_PARAMS> |
| 562 | void NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::SetBucket(DWORD dwIndex, DWORD dwFirstEntry, DWORD cEntries) |
| 563 | { |
| 564 | CONTRACTL |
| 565 | { |
| 566 | NOTHROW; |
| 567 | GC_NOTRIGGER; |
| 568 | MODE_ANY; |
| 569 | } |
| 570 | CONTRACTL_END; |
| 571 | |
| 572 | _ASSERTE(dwIndex < m_cBuckets); |
| 573 | _ASSERTE(cEntries <= m_cMaxEntriesInBucket); |
| 574 | if (cEntries > 0) |
| 575 | { |
| 576 | _ASSERTE(dwFirstEntry < m_cEntries); |
| 577 | _ASSERTE(dwFirstEntry <= m_dwInitialEntryMask); |
| 578 | } |
| 579 | |
| 580 | // Find the start of the bucket we're interested in based on the index and size chosen for buckets in this |
| 581 | // list instance. |
| 582 | BYTE *pbBucket = (BYTE*)this + sizeof(PersistedBucketList) + (dwIndex * m_cbBucket); |
| 583 | |
| 584 | // Handle each format of bucket separately. In all cases form a single bitfield with low-order bits |
| 585 | // specifying the initial entry index and higher bits containing the entry count. Write this into the |
| 586 | // bucket entry using the correct number of bytes. |
| 587 | ULONG64 qwBucketBits = dwFirstEntry | (cEntries << m_dwEntryCountShift); |
| 588 | switch (m_cbBucket) |
| 589 | { |
| 590 | case 2: |
| 591 | { |
| 592 | _ASSERTE(m_dwEntryCountShift < 16 && m_dwInitialEntryMask < 0xffff); |
| 593 | *(WORD*)pbBucket = (WORD)qwBucketBits; |
| 594 | break; |
| 595 | } |
| 596 | |
| 597 | case 4: |
| 598 | { |
| 599 | _ASSERTE(m_dwEntryCountShift < 32 && m_dwInitialEntryMask < 0xffffffff); |
| 600 | *(DWORD*)pbBucket = (DWORD)qwBucketBits; |
| 601 | break; |
| 602 | } |
| 603 | |
| 604 | case 8: |
| 605 | { |
| 606 | _ASSERTE(m_dwEntryCountShift < 64); |
| 607 | *(ULONG64*)pbBucket = qwBucketBits; |
| 608 | break; |
| 609 | } |
| 610 | |
| 611 | default: |
| 612 | _ASSERTE(!"Invalid bucket list bucket size" ); |
| 613 | } |
| 614 | } |
| 615 | |
| 616 | // Return the number of bits required to express a unique ID for the number of entities given. |
| 617 | //static |
| 618 | template <NGEN_HASH_PARAMS> |
| 619 | DWORD NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::BitsRequired(DWORD cEntities) |
| 620 | { |
| 621 | LIMITED_METHOD_CONTRACT; |
| 622 | |
| 623 | // Starting with a bit-mask of the most significant bit and iterating over masks for successively less |
| 624 | // significant bits, stop as soon as the mask co-incides with a set bit in the value. Simultaneously we're |
| 625 | // counting down the bits required to express the range of values implied by seeing the corresponding bit |
| 626 | // set in the value (e.g. when we're testing the high bit we know we'd need 32-bits to encode the range of |
| 627 | // values that have this bit set). Stop when we get to one bit (we never return 0 bits required, even for |
| 628 | // an input value of 0). |
| 629 | DWORD dwMask = 0x80000000; |
| 630 | DWORD cBits = 32; |
| 631 | while (cBits > 1) |
| 632 | { |
| 633 | if (cEntities & dwMask) |
| 634 | return cBits; |
| 635 | |
| 636 | dwMask >>= 1; |
| 637 | cBits--; |
| 638 | } |
| 639 | |
| 640 | return 1; |
| 641 | } |
| 642 | |
| 643 | // Return the minimum size (in bytes) of each bucket list entry that can express all buckets given the max |
| 644 | // count of entries and entries in a single bucket chain. |
| 645 | // static |
| 646 | template <NGEN_HASH_PARAMS> |
| 647 | DWORD NgenHashTable<NGEN_HASH_ARGS>::PersistedBucketList::GetBucketSize(DWORD cEntries, DWORD cMaxEntriesInBucket) |
| 648 | { |
| 649 | CONTRACTL |
| 650 | { |
| 651 | NOTHROW; |
| 652 | GC_NOTRIGGER; |
| 653 | MODE_ANY; |
| 654 | } |
| 655 | CONTRACTL_END; |
| 656 | |
| 657 | // We need enough bits to express a start entry index (related to the total number of entries in the |
| 658 | // table) and a chain count (so take the maximum chain length into consideration). |
| 659 | DWORD cTotalBits = BitsRequired(cEntries) + BitsRequired(cMaxEntriesInBucket); |
| 660 | |
| 661 | // Rather than support complete flexibility (an arbitrary number of bytes to express the combination of |
| 662 | // the two bitfields above) we'll just pull out the most useful selection (which simplifies the access |
| 663 | // code and potentially might give us a perf edge over the more generalized algorithm). |
| 664 | |
| 665 | // We want naturally aligned bucket entries for access perf, 1 byte entries aren't all that interesting |
| 666 | // (most tables won't be small enough to be expressed this way and those that are won't get much benefit |
| 667 | // from the extra compression of the bucket list). We also don't believe we'll ever need more than 64 |
| 668 | // bits. This leaves us with 2, 4 and 8 byte entries. The tables in the current desktop CLR for mscorlib |
| 669 | // will fit in the 2-byte category and will give us substantial space saving over the naive implementation |
| 670 | // of a bucket with two DWORDs. |
| 671 | |
| 672 | if (cTotalBits <= 16) |
| 673 | return 2; |
| 674 | |
| 675 | if (cTotalBits <= 32) |
| 676 | return 4; |
| 677 | |
| 678 | // Invariant guaranteed by BitsRequired above. |
| 679 | _ASSERTE(cTotalBits <= 64); |
| 680 | return 8; |
| 681 | } |
| 682 | |
| 683 | #ifndef DACCESS_COMPILE |
| 684 | |
| 685 | #ifdef _MSC_VER |
| 686 | #pragma warning(push) |
| 687 | #pragma warning(disable:4723) // Prevent "warning C4723: potential divide by 0" |
| 688 | #endif // _MSC_VER |
| 689 | |
| 690 | // Call during ngen to save hash table data structures into the ngen image. Calls derived-class |
| 691 | // implementations of ShouldSave to determine which entries should be serialized, IsHotEntry to hot/cold split |
| 692 | // the entries and SaveEntry to allow per-entry extension of the saving process. |
| 693 | template <NGEN_HASH_PARAMS> |
| 694 | void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *pProfileData) |
| 695 | { |
| 696 | STANDARD_VM_CONTRACT; |
| 697 | |
| 698 | // This is a fairly long and complex process but at its heart it's fairly linear. We perform multiple |
| 699 | // passes over the data in sequence which might seem slow but everything is arranged to avoid any O(N^2) |
| 700 | // algorithms. |
| 701 | |
| 702 | // Persisted hashes had better have supplied an owning module at creation time (otherwise we won't know |
| 703 | // how to find a loader heap for further allocations at runtime: we don't know how to serialize a loader |
| 704 | // heap pointer). |
| 705 | _ASSERTE(!m_pModule.IsNull()); |
| 706 | |
| 707 | // We can only save once during ngen so the hot and cold sections of the hash cannot have been populated |
| 708 | // yet. |
| 709 | _ASSERTE(m_sHotEntries.m_cEntries == 0 && m_sColdEntries.m_cEntries == 0); |
| 710 | |
| 711 | DWORD i; |
| 712 | |
| 713 | // As we re-arrange volatile warm entries into hot and cold sets of persisted entries we need to keep lots |
| 714 | // of intermediate tracking information. We also need to provide a subset of this mapping information to |
| 715 | // the sub-class (so it can fix up cross entry references for example). The temporary structure allocated |
| 716 | // below performs that function (it will be destructed automatically at the end of this method). |
| 717 | EntryMappingTable sEntryMap; |
| 718 | sEntryMap.m_cEntries = m_cWarmEntries; |
| 719 | #ifdef _PREFAST_ |
| 720 | #pragma warning(suppress:6211) // Suppress bogus prefast warning about memory leak (EntryMappingTable acts as a holder) |
| 721 | #endif |
| 722 | |
| 723 | // The 'typename' keyword shouldn't be necessary, but g++ gets confused without it. |
| 724 | sEntryMap.m_pEntries = new typename EntryMappingTable::Entry[m_cWarmEntries]; |
| 725 | |
| 726 | // |
| 727 | // PHASE 1 |
| 728 | // |
| 729 | // Iterate all the current warm entries, ask the sub-class which of them should be saved into the image |
| 730 | // and of those which are hot and which are cold. |
| 731 | // |
| 732 | |
| 733 | DWORD cHotEntries = 0; |
| 734 | DWORD cColdEntries = 0; |
| 735 | |
| 736 | // Visit each warm bucket. |
| 737 | for (i = 0; i < m_cWarmBuckets; i++) |
| 738 | { |
| 739 | // Iterate through the chain of warm entries for this bucket. |
| 740 | VolatileEntry *pOldEntry = (GetWarmBuckets())[i]; |
| 741 | while (pOldEntry) |
| 742 | { |
| 743 | // Is the current entry being saved into the image? |
| 744 | if (DOWNCALL(ShouldSave)(pImage, &pOldEntry->m_sValue)) |
| 745 | { |
| 746 | // Yes, so save the details into the next available slot in the entry map. At this stage we |
| 747 | // know the original entry address, the hash value and whether the entry is hot or cold. |
| 748 | DWORD dwCurrentEntry = cHotEntries + cColdEntries; |
| 749 | sEntryMap.m_pEntries[dwCurrentEntry].m_pOldEntry = &pOldEntry->m_sValue; |
| 750 | sEntryMap.m_pEntries[dwCurrentEntry].m_iHashValue = pOldEntry->m_iHashValue; |
| 751 | |
| 752 | // Is the entry hot? When given no profile data we assume cold. |
| 753 | if (pProfileData != NULL && DOWNCALL(IsHotEntry)(&pOldEntry->m_sValue, pProfileData)) |
| 754 | { |
| 755 | cHotEntries++; |
| 756 | sEntryMap.m_pEntries[dwCurrentEntry].m_fHot = true; |
| 757 | } |
| 758 | else |
| 759 | { |
| 760 | cColdEntries++; |
| 761 | sEntryMap.m_pEntries[dwCurrentEntry].m_fHot = false; |
| 762 | } |
| 763 | } |
| 764 | |
| 765 | pOldEntry = pOldEntry->m_pNextEntry; |
| 766 | } |
| 767 | } |
| 768 | |
| 769 | // Set size of the entry map based on the real number of entries we're going to save. |
| 770 | _ASSERTE((cHotEntries + cColdEntries) <= m_cWarmEntries); |
| 771 | sEntryMap.m_cEntries = cHotEntries + cColdEntries; |
| 772 | |
| 773 | // |
| 774 | // PHASE 2 |
| 775 | // |
| 776 | // Determine the layout of the new hot and cold tables (if applicable). We pick new bucket list sizes |
| 777 | // based on the number of entries to go in each table and from that we can calculate the length of each |
| 778 | // entry chain off each bucket (which is important both to derive a maximum chain length used when |
| 779 | // picking an optimized encoding for the bucket list and allows us to layout the new entries in linear |
| 780 | // time). |
| 781 | // |
| 782 | // We need a couple of extra arrays to track bucket chain sizes until we have enough info to allocate the |
| 783 | // new bucket lists themselves. |
| 784 | // |
| 785 | |
| 786 | // We'll allocate half as many buckets as entries (with at least 1 bucket, or zero if there are no entries |
| 787 | // in this section of the hash). |
| 788 | DWORD cHotBuckets = cHotEntries ? NextLargestPrime(cHotEntries / 2) : 0; |
| 789 | DWORD cColdBuckets = cColdEntries ? NextLargestPrime(cColdEntries / 2) : 0; |
| 790 | |
| 791 | // Allocate arrays to track bucket chain lengths for each hot or cold bucket list (as needed). |
| 792 | DWORD *pHotBucketSizes = cHotBuckets ? new DWORD[cHotBuckets] : NULL; |
| 793 | memset(pHotBucketSizes, 0, cHotBuckets * sizeof(DWORD)); |
| 794 | |
| 795 | DWORD *pColdBucketSizes = cColdBuckets ? new DWORD[cColdBuckets] : NULL; |
| 796 | memset(pColdBucketSizes, 0, cColdBuckets * sizeof(DWORD)); |
| 797 | |
| 798 | // We'll calculate the maximum bucket chain length separately for hot and cold sections (each has its own |
| 799 | // bucket list that might be optimized differently). |
| 800 | DWORD cMaxHotChain = 0; |
| 801 | DWORD cMaxColdChain = 0; |
| 802 | |
| 803 | // Iterate through all the entries to be saved (linear scan through the entry map we built in phase 1). |
| 804 | for (i = 0; i < sEntryMap.m_cEntries; i++) |
| 805 | { |
| 806 | // The 'typename' keyword shouldn't be necessary, but g++ gets confused without it. |
| 807 | typename EntryMappingTable::Entry *pMapEntry = &sEntryMap.m_pEntries[i]; |
| 808 | |
| 809 | // For each entry calculate which bucket it will end up in under the revised bucket list. Also record |
| 810 | // its order in the bucket chain (first come, first served). Recording this ordinal now is what allows |
| 811 | // us to lay out entries into their final order using a linear algorithm in a later phase. |
| 812 | if (pMapEntry->m_fHot) |
| 813 | { |
| 814 | _ASSERTE(cHotBuckets != 0); |
| 815 | pMapEntry->m_dwNewBucket = pMapEntry->m_iHashValue % cHotBuckets; |
| 816 | pMapEntry->m_dwChainOrdinal = pHotBucketSizes[pMapEntry->m_dwNewBucket]++; |
| 817 | if (pHotBucketSizes[pMapEntry->m_dwNewBucket] > cMaxHotChain) |
| 818 | cMaxHotChain = pHotBucketSizes[pMapEntry->m_dwNewBucket]; |
| 819 | } |
| 820 | else |
| 821 | { |
| 822 | // The C++ compiler is currently complaining that cColdBuckets could be zero in the modulo |
| 823 | // operation below. It cannot due to the logic in this method (if we have a cold entry we'll have |
| 824 | // at least one cold bucket, see the assignments above) but the flow is far too complex for the |
| 825 | // C++ compiler to follow. Unfortunately it won't be told (the warning can't be disabled and even |
| 826 | // an __assume won't work) so we take the hit of generating the useless extra if below. |
| 827 | if (cColdBuckets > 0) |
| 828 | { |
| 829 | pMapEntry->m_dwNewBucket = pMapEntry->m_iHashValue % cColdBuckets; |
| 830 | pMapEntry->m_dwChainOrdinal = pColdBucketSizes[pMapEntry->m_dwNewBucket]++; |
| 831 | if (pColdBucketSizes[pMapEntry->m_dwNewBucket] > cMaxColdChain) |
| 832 | cMaxColdChain = pColdBucketSizes[pMapEntry->m_dwNewBucket]; |
| 833 | } |
| 834 | else |
| 835 | _ASSERTE(!"Should be unreachable, see comment above" ); |
| 836 | } |
| 837 | } |
| 838 | |
| 839 | // |
| 840 | // PHASE 3 |
| 841 | // |
| 842 | // Allocate the new hot and cold bucket lists and entry arrays (as needed). The bucket lists have |
| 843 | // optimized layout based on knowledge of the entries they will map (total number of entries and the size |
| 844 | // of the largest single bucket chain). |
| 845 | // |
| 846 | |
| 847 | if (cHotEntries) |
| 848 | { |
| 849 | m_sHotEntries.m_cEntries = cHotEntries; |
| 850 | m_sHotEntries.m_cBuckets = cHotBuckets; |
| 851 | m_sHotEntries.m_pEntries.SetValue(new PersistedEntry[cHotEntries]); |
| 852 | m_sHotEntries.m_pBuckets.SetValue(PersistedBucketList::CreateList(cHotBuckets, cHotEntries, cMaxHotChain)); |
| 853 | memset(GetPersistedHotEntries(), 0, cHotEntries * sizeof(PersistedEntry)); // NGen determinism |
| 854 | } |
| 855 | |
| 856 | if (cColdEntries) |
| 857 | { |
| 858 | m_sColdEntries.m_cEntries = cColdEntries; |
| 859 | m_sColdEntries.m_cBuckets = cColdBuckets; |
| 860 | m_sColdEntries.m_pEntries.SetValue(new PersistedEntry[cColdEntries]); |
| 861 | m_sColdEntries.m_pBuckets.SetValue(PersistedBucketList::CreateList(cColdBuckets, cColdEntries, cMaxColdChain)); |
| 862 | memset(GetPersistedColdEntries(), 0, cColdEntries * sizeof(PersistedEntry)); // NGen determinism |
| 863 | } |
| 864 | |
| 865 | // |
| 866 | // PHASE 4 |
| 867 | // |
| 868 | // Initialize the bucket lists. We need to set an initial entry index (index into the entry array) and |
| 869 | // entry count for each bucket. The counts we already computed in phase 2 and since we're free to order |
| 870 | // the entry array however we like, we can compute the initial entry index for each bucket in turn |
| 871 | // trivially by laying out all entries for bucket 0 first followed by all entries for bucket 1 etc. |
| 872 | // |
| 873 | // This also has the nice effect of placing entries in the same bucket chain contiguously (and in the |
| 874 | // order that a full hash traversal will take). |
| 875 | // |
| 876 | |
| 877 | DWORD dwNextId = 0; // This represents the index of the next entry to start a bucket chain |
| 878 | for (i = 0; i < cHotBuckets; i++) |
| 879 | { |
| 880 | m_sHotEntries.m_pBuckets.GetValue()->SetBucket(i, dwNextId, pHotBucketSizes[i]); |
| 881 | dwNextId += pHotBucketSizes[i]; |
| 882 | } |
| 883 | _ASSERTE(dwNextId == m_sHotEntries.m_cEntries); |
| 884 | |
| 885 | dwNextId = 0; // Reset index for the cold entries (remember they have their own table of entries) |
| 886 | for (i = 0; i < cColdBuckets; i++) |
| 887 | { |
| 888 | m_sColdEntries.m_pBuckets.GetValue()->SetBucket(i, dwNextId, pColdBucketSizes[i]); |
| 889 | dwNextId += pColdBucketSizes[i]; |
| 890 | } |
| 891 | _ASSERTE(dwNextId == m_sColdEntries.m_cEntries); |
| 892 | |
| 893 | // |
| 894 | // PHASE 5 |
| 895 | // |
| 896 | // Determine new addresses for each entry. This is relatively simple since we know the bucket index, the |
| 897 | // index of the first entry for that bucket and how far into that chain each entry is located. |
| 898 | // |
| 899 | |
| 900 | for (i = 0; i < sEntryMap.m_cEntries; i++) |
| 901 | { |
| 902 | // The 'typename' keyword shouldn't be necessary, but g++ gets confused without it. |
| 903 | typename EntryMappingTable::Entry *pMapEntry = &sEntryMap.m_pEntries[i]; |
| 904 | |
| 905 | // Entry block depends on whether this entry is hot or cold. |
| 906 | APTR_PersistedEntry pPersistedEntries = pMapEntry->m_fHot ? GetPersistedHotEntries() : GetPersistedColdEntries(); |
| 907 | PTR_PersistedBucketList pPersistedBucketsList = pMapEntry->m_fHot ? GetPersistedHotBuckets() : GetPersistedColdBuckets(); |
| 908 | |
| 909 | // We already know the new bucket this entry will go into. Retrieve the index of the first entry in |
| 910 | // that bucket chain. |
| 911 | DWORD dwBaseChainIndex = pPersistedBucketsList->GetInitialEntry(pMapEntry->m_dwNewBucket); |
| 912 | |
| 913 | // This entry will be located at some offset from the index above (we calculated this ordinal in phase |
| 914 | // 2). |
| 915 | PersistedEntry *pNewEntry = &pPersistedEntries[dwBaseChainIndex + pMapEntry->m_dwChainOrdinal]; |
| 916 | |
| 917 | // Record the address of the embedded sub-class hash entry in the map entry (sub-classes will use this |
| 918 | // info to map old entry addresses to their new locations). |
| 919 | sEntryMap.m_pEntries[i].m_pNewEntry = &pNewEntry->m_sValue; |
| 920 | |
| 921 | // Initialize the new entry. Note that a simple bit-copy is performed on the sub-classes embedded |
| 922 | // entry. If fixups are needed they can be performed in the call to SaveEntry in the next phase. |
| 923 | pNewEntry->m_sValue = *pMapEntry->m_pOldEntry; |
| 924 | pNewEntry->m_iHashValue = pMapEntry->m_iHashValue; |
| 925 | } |
| 926 | |
| 927 | // |
| 928 | // PHASE 6 |
| 929 | // |
| 930 | // For each entry give the hash sub-class a chance to perform any additional saving or fixups. We pass |
| 931 | // both the old and new address of each entry, plus the mapping table so they can map other entry |
| 932 | // addresses (if, for example, they have cross-entry pointer fields in their data). |
| 933 | // |
| 934 | // We ask for each entry whether the saved data will be immutable. This is an optimization: if all |
| 935 | // entries turn out to be immutable we will save the entire entry array in a read-only (shareable) |
| 936 | // section. |
| 937 | // |
| 938 | |
| 939 | bool fAllEntriesImmutable = true; |
| 940 | for (i = 0; i < sEntryMap.m_cEntries; i++) |
| 941 | if (!DOWNCALL(SaveEntry)(pImage, |
| 942 | pProfileData, |
| 943 | sEntryMap.m_pEntries[i].m_pOldEntry, |
| 944 | sEntryMap.m_pEntries[i].m_pNewEntry, |
| 945 | &sEntryMap)) |
| 946 | fAllEntriesImmutable = false; |
| 947 | |
| 948 | // We're mostly done. Now just some cleanup and the actual DataImage storage operations. |
| 949 | |
| 950 | // We don't need the bucket size tracking arrays any more. |
| 951 | delete [] pHotBucketSizes; |
| 952 | delete [] pColdBucketSizes; |
| 953 | |
| 954 | // If there are any hot entries store the entry array and bucket list. |
| 955 | if (cHotEntries) |
| 956 | { |
| 957 | pImage->StoreStructure(GetPersistedHotEntries(), |
| 958 | static_cast<ULONG>(sizeof(PersistedEntry) * cHotEntries), |
| 959 | fAllEntriesImmutable ? DataImage::ITEM_NGEN_HASH_ENTRIES_RO_HOT : DataImage::ITEM_NGEN_HASH_ENTRIES_HOT); |
| 960 | |
| 961 | pImage->StoreStructure(GetPersistedHotBuckets(), |
| 962 | static_cast<ULONG>(m_sHotEntries.m_pBuckets.GetValue()->GetSize(m_sHotEntries.m_cBuckets)), |
| 963 | DataImage::ITEM_NGEN_HASH_BUCKETLIST_HOT); |
| 964 | } |
| 965 | |
| 966 | // If there are any cold entries store the entry array and bucket list. |
| 967 | if (cColdEntries) |
| 968 | { |
| 969 | pImage->StoreStructure(GetPersistedColdEntries(), |
| 970 | static_cast<ULONG>(sizeof(PersistedEntry) * cColdEntries), |
| 971 | fAllEntriesImmutable ? DataImage::ITEM_NGEN_HASH_ENTRIES_RO_COLD : DataImage::ITEM_NGEN_HASH_ENTRIES_COLD); |
| 972 | |
| 973 | pImage->StoreStructure(GetPersistedColdBuckets(), |
| 974 | static_cast<ULONG>(GetPersistedColdBuckets()->GetSize(m_sColdEntries.m_cBuckets)), |
| 975 | DataImage::ITEM_NGEN_HASH_BUCKETLIST_COLD); |
| 976 | } |
| 977 | |
| 978 | // Store the root data structure itself. |
| 979 | pImage->StoreStructure(this, sizeof(FINAL_CLASS), cHotEntries ? |
| 980 | DataImage::ITEM_NGEN_HASH_HOT : DataImage::ITEM_NGEN_HASH_COLD); |
| 981 | |
| 982 | // We've moved the warm entries to hot and cold sections, so reset the warm section of the table. We only |
| 983 | // do this on the copy of the table that's going to be saved into the ngen image. This is important since |
| 984 | // (especially in the case of generics) we might continue to access this table throughout the rest of the |
| 985 | // save/arrange/fixup process. Leaving two copies of saved entries in the table (hot or cold plus warm) |
| 986 | // doesn't have any real impact, but removing the warm entries could be problematic where the entry was |
| 987 | // culled from the ngen image. In those cases we'll get a miss on the lookup with the result that the |
| 988 | // caller might try to add the type back to the table, something that is prohibited in the debug build |
| 989 | // during the ngen save/arrange/fixup phases. |
| 990 | |
| 991 | // Reset the warm buckets to their original size or a fairly restrictive cap. These (empty) buckets will |
| 992 | // be saved into the ngen image and form the basis for further entries added at runtime. Thus we have a |
| 993 | // trade-off between storing dead space in the ngen image and having to re-size the bucket list at |
| 994 | // runtime. Note that we can't save a zero sized bucket list: the invariant we have is that there are |
| 995 | // always a non-zero number of buckets available when we come to do an insertion (since insertions cannot |
| 996 | // fail). An alternative strategy would be to initialize these buckets at ngen image load time. |
| 997 | _ASSERTE(m_cWarmBuckets >= m_cInitialBuckets); |
| 998 | DWORD cNewWarmBuckets = min(m_cInitialBuckets, 11); |
| 999 | |
| 1000 | // Create the ngen version of the warm buckets. |
| 1001 | pImage->StoreStructure(GetWarmBuckets(), |
| 1002 | cNewWarmBuckets * sizeof(VolatileEntry*), |
| 1003 | DataImage::ITEM_NGEN_HASH_HOT); |
| 1004 | |
| 1005 | // Reset the ngen-version of the table to have no warm entries and the reduced warm bucket count. |
| 1006 | NgenHashTable<NGEN_HASH_ARGS> *pNewTable = (NgenHashTable<NGEN_HASH_ARGS>*)pImage->GetImagePointer(this); |
| 1007 | pNewTable->m_cWarmEntries = 0; |
| 1008 | pNewTable->m_cWarmBuckets = cNewWarmBuckets; |
| 1009 | |
| 1010 | // Zero-out the ngen version of the warm buckets. |
| 1011 | VolatileEntry *pNewBuckets = (VolatileEntry*)pImage->GetImagePointer(GetWarmBuckets()); |
| 1012 | memset(pNewBuckets, 0, cNewWarmBuckets * sizeof(VolatileEntry*)); |
| 1013 | } |
| 1014 | |
| 1015 | #ifdef _MSC_VER |
| 1016 | #pragma warning(pop) |
| 1017 | #endif // _MSC_VER: |
| 1018 | |
| 1019 | // Call during ngen to register fixups for hash table data structure fields. Calls derived-class |
| 1020 | // implementation of FixupEntry to allow per-entry extension of the fixup process. |
| 1021 | template <NGEN_HASH_PARAMS> |
| 1022 | void NgenHashTable<NGEN_HASH_ARGS>::BaseFixup(DataImage *pImage) |
| 1023 | { |
| 1024 | STANDARD_VM_CONTRACT; |
| 1025 | |
| 1026 | DWORD i; |
| 1027 | |
| 1028 | // Fixup the module pointer. |
| 1029 | pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pModule)); |
| 1030 | |
| 1031 | // Throw away the heap pointer, we can't serialize it into the image. We'll rely on the loader heap |
| 1032 | // associated with the module above at runtime. |
| 1033 | pImage->ZeroPointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pHeap)); |
| 1034 | |
| 1035 | // Give the hash sub-class a chance to fixup any pointers in its entries. We provide the pointer to the |
| 1036 | // hot or cold entry block and the offset into that block for this entry since we don't save individual |
| 1037 | // zap nodes for each entry; just a single node covering the entire array. As a result all fixups have to |
| 1038 | // be relative to the base of this array. |
| 1039 | |
| 1040 | for (i = 0; i < m_sHotEntries.m_cEntries; i++) |
| 1041 | DOWNCALL(FixupEntry)(pImage, |
| 1042 | &(GetPersistedHotEntries())[i].m_sValue, |
| 1043 | GetPersistedHotEntries(), |
| 1044 | i * sizeof(PersistedEntry)); |
| 1045 | |
| 1046 | for (i = 0; i < m_sColdEntries.m_cEntries; i++) |
| 1047 | DOWNCALL(FixupEntry)(pImage, |
| 1048 | &(GetPersistedColdEntries())[i].m_sValue, |
| 1049 | GetPersistedColdEntries(), |
| 1050 | i * sizeof(PersistedEntry)); |
| 1051 | |
| 1052 | // Fixup the warm (empty) bucket list. |
| 1053 | pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pWarmBuckets)); |
| 1054 | |
| 1055 | // Fixup the hot entry array and bucket list. |
| 1056 | pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries) + offsetof(PersistedEntries, m_pEntries)); |
| 1057 | pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries) + offsetof(PersistedEntries, m_pBuckets)); |
| 1058 | |
| 1059 | // Fixup the cold entry array and bucket list. |
| 1060 | pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries) + offsetof(PersistedEntries, m_pEntries)); |
| 1061 | pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries) + offsetof(PersistedEntries, m_pBuckets)); |
| 1062 | } |
| 1063 | #endif // !DACCESS_COMPILE |
| 1064 | #endif // FEATURE_PREJIT |
| 1065 | |
| 1066 | #ifdef DACCESS_COMPILE |
| 1067 | |
| 1068 | // Call during DAC enumeration of memory regions to save all hash table data structures. Calls derived-class |
| 1069 | // implementation of EnumMemoryRegionsForEntry to allow additional per-entry memory to be reported. |
| 1070 | template <NGEN_HASH_PARAMS> |
| 1071 | void NgenHashTable<NGEN_HASH_ARGS>::BaseEnumMemoryRegions(CLRDataEnumMemoryFlags flags) |
| 1072 | { |
| 1073 | SUPPORTS_DAC; |
| 1074 | |
| 1075 | // Save the base data structure itself (can't use DAC_ENUM_DTHIS() since the size to save is based on a |
| 1076 | // sub-class). |
| 1077 | DacEnumMemoryRegion(dac_cast<TADDR>(this), sizeof(FINAL_CLASS)); |
| 1078 | |
| 1079 | // Save the warm bucket list. |
| 1080 | DacEnumMemoryRegion(dac_cast<TADDR>(GetWarmBuckets()), m_cWarmBuckets * sizeof(VolatileEntry*)); |
| 1081 | |
| 1082 | // Save all the warm entries. |
| 1083 | if (GetWarmBuckets().IsValid()) |
| 1084 | { |
| 1085 | for (DWORD i = 0; i < m_cWarmBuckets; i++) |
| 1086 | { |
| 1087 | PTR_VolatileEntry pEntry = (GetWarmBuckets())[i]; |
| 1088 | while (pEntry.IsValid()) |
| 1089 | { |
| 1090 | pEntry.EnumMem(); |
| 1091 | |
| 1092 | // Ask the sub-class whether each entry points to further data to be saved. |
| 1093 | DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_VOLATILE_ENTRY(pEntry), flags); |
| 1094 | |
| 1095 | pEntry = pEntry->m_pNextEntry; |
| 1096 | } |
| 1097 | } |
| 1098 | } |
| 1099 | |
| 1100 | #ifdef FEATURE_PREJIT |
| 1101 | // Save hot buckets and entries. |
| 1102 | if (m_sHotEntries.m_cEntries > 0) |
| 1103 | { |
| 1104 | DacEnumMemoryRegion(dac_cast<TADDR>(GetPersistedHotEntries()), |
| 1105 | m_sHotEntries.m_cEntries * sizeof(PersistedEntry)); |
| 1106 | DacEnumMemoryRegion(dac_cast<TADDR>(GetPersistedHotBuckets()), |
| 1107 | GetPersistedHotBuckets()->GetSize(m_sHotEntries.m_cBuckets)); |
| 1108 | for (DWORD i = 0; i < m_sHotEntries.m_cEntries; i++) |
| 1109 | { |
| 1110 | PTR_PersistedEntry pEntry = dac_cast<PTR_PersistedEntry>(&(GetPersistedHotEntries())[i]); |
| 1111 | DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_PERSISTED_ENTRY(pEntry), flags); |
| 1112 | } |
| 1113 | } |
| 1114 | |
| 1115 | // Save cold buckets and entries. |
| 1116 | if (m_sColdEntries.m_cEntries > 0) |
| 1117 | { |
| 1118 | DacEnumMemoryRegion(dac_cast<TADDR>(GetPersistedColdEntries()), |
| 1119 | m_sColdEntries.m_cEntries * sizeof(PersistedEntry)); |
| 1120 | DacEnumMemoryRegion(dac_cast<TADDR>(GetPersistedColdBuckets()), |
| 1121 | GetPersistedColdBuckets()->GetSize(m_sColdEntries.m_cBuckets)); |
| 1122 | for (DWORD i = 0; i < m_sColdEntries.m_cEntries; i++) |
| 1123 | { |
| 1124 | PTR_PersistedEntry pEntry = dac_cast<PTR_PersistedEntry>(&(GetPersistedColdEntries())[i]); |
| 1125 | DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_PERSISTED_ENTRY(pEntry), flags); |
| 1126 | } |
| 1127 | } |
| 1128 | #endif // FEATURE_PREJIT |
| 1129 | |
| 1130 | // Save the module if present. |
| 1131 | if (GetModule().IsValid()) |
| 1132 | GetModule()->EnumMemoryRegions(flags, true); |
| 1133 | } |
| 1134 | #endif // DACCESS_COMPILE |
| 1135 | |
| 1136 | #ifdef FEATURE_PREJIT |
| 1137 | |
| 1138 | // Find the first persisted entry (hot or cold based on pEntries) that matches the given hash. Looks only in |
| 1139 | // the persisted block given (i.e. searches only hot *or* cold entries). Returns NULL on failure. Otherwise |
| 1140 | // returns pointer to the derived class portion of the entry and initializes the provided LookupContext to |
| 1141 | // allow enumeration of any further matches. |
| 1142 | template <NGEN_HASH_PARAMS> |
| 1143 | DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::FindPersistedEntryByHash(PersistedEntries *pEntries, NgenHashValue iHash, LookupContext *pContext) |
| 1144 | { |
| 1145 | CONTRACTL |
| 1146 | { |
| 1147 | NOTHROW; |
| 1148 | GC_NOTRIGGER; |
| 1149 | MODE_ANY; |
| 1150 | SUPPORTS_DAC; |
| 1151 | PRECONDITION(CheckPointer(pContext)); |
| 1152 | } |
| 1153 | CONTRACTL_END; |
| 1154 | |
| 1155 | // No point looking if there are no entries. |
| 1156 | if (pEntries->m_cEntries == 0) |
| 1157 | return NULL; |
| 1158 | |
| 1159 | // Since there is at least one entry there must be at least one bucket. |
| 1160 | _ASSERTE(pEntries->m_cBuckets > 0); |
| 1161 | |
| 1162 | DWORD eType = (pEntries == &m_sHotEntries ? Hot : Cold); |
| 1163 | |
| 1164 | // Get the first entry and count of entries for the bucket which contains all entries with the given hash |
| 1165 | // code. |
| 1166 | DWORD dwEntryIndex, cEntriesLeft; |
| 1167 | if (eType == Hot) |
| 1168 | { |
| 1169 | GetPersistedHotBuckets()->GetBucket(iHash % pEntries->m_cBuckets, &dwEntryIndex, &cEntriesLeft); |
| 1170 | } |
| 1171 | else |
| 1172 | { |
| 1173 | GetPersistedColdBuckets()->GetBucket(iHash % pEntries->m_cBuckets, &dwEntryIndex, &cEntriesLeft); |
| 1174 | } |
| 1175 | |
| 1176 | // Determine the address of the first entry in the chain by indexing into the entry array. |
| 1177 | PTR_PersistedEntry pEntry; |
| 1178 | |
| 1179 | if (eType == Hot) |
| 1180 | { |
| 1181 | pEntry = dac_cast<PTR_PersistedEntry>(&(GetPersistedHotEntries())[dwEntryIndex]); |
| 1182 | } |
| 1183 | else |
| 1184 | { |
| 1185 | pEntry = dac_cast<PTR_PersistedEntry>(&(GetPersistedColdEntries())[dwEntryIndex]); |
| 1186 | } |
| 1187 | |
| 1188 | // Iterate while we've still got entries left to check in this chain. |
| 1189 | while (cEntriesLeft--) |
| 1190 | { |
| 1191 | if (pEntry->m_iHashValue == iHash) |
| 1192 | { |
| 1193 | // We've found our match. |
| 1194 | |
| 1195 | // Record our current search state into the provided context so that a subsequent call to |
| 1196 | // BaseFindNextEntryByHash can pick up the search where it left off. |
| 1197 | pContext->m_pEntry = dac_cast<TADDR>(pEntry); |
| 1198 | pContext->m_eType = eType; |
| 1199 | pContext->m_cRemainingEntries = cEntriesLeft; |
| 1200 | |
| 1201 | // Return the address of the sub-classes' embedded entry structure. |
| 1202 | return VALUE_FROM_PERSISTED_ENTRY(pEntry); |
| 1203 | } |
| 1204 | |
| 1205 | // Move to the next entry in the chain. |
| 1206 | pEntry++; |
| 1207 | } |
| 1208 | |
| 1209 | // If we get here then none of the entries in the target bucket matched the hash code and we have a miss |
| 1210 | // (for this section of the table at least). |
| 1211 | return NULL; |
| 1212 | } |
| 1213 | |
| 1214 | #ifndef DACCESS_COMPILE |
| 1215 | template <NGEN_HASH_PARAMS> |
| 1216 | NgenHashTable<NGEN_HASH_ARGS>::EntryMappingTable::~EntryMappingTable() |
| 1217 | { |
| 1218 | LIMITED_METHOD_CONTRACT; |
| 1219 | |
| 1220 | delete [] m_pEntries; |
| 1221 | } |
| 1222 | |
| 1223 | // Given an old entry address (pre-BaseSave) return the address of the entry relocated ready for saving to |
| 1224 | // disk. Note that this address is the (ngen) runtime address, not the disk image address you can further |
| 1225 | // obtain by calling DataImage::GetImagePointer(). |
| 1226 | template <NGEN_HASH_PARAMS> |
| 1227 | VALUE *NgenHashTable<NGEN_HASH_ARGS>::EntryMappingTable::GetNewEntryAddress(VALUE *pOldEntry) |
| 1228 | { |
| 1229 | LIMITED_METHOD_CONTRACT; |
| 1230 | |
| 1231 | // Perform a simple linear search. If this proves to be a bottleneck in ngen production (the only scenario |
| 1232 | // in which it's called) we can replace this with something faster such as a hash lookup. |
| 1233 | for (DWORD i = 0; i < m_cEntries; i++) |
| 1234 | if (m_pEntries[i].m_pOldEntry == pOldEntry) |
| 1235 | return m_pEntries[i].m_pNewEntry; |
| 1236 | |
| 1237 | _ASSERTE(!"Couldn't map old hash entry to new entry" ); |
| 1238 | return NULL; |
| 1239 | } |
| 1240 | #endif // !DACCESS_COMPILE |
| 1241 | #endif // FEATURE_PREJIT |
| 1242 | |
| 1243 | // Find the first volatile (warm) entry that matches the given hash. Looks only at warm entries. Returns NULL |
| 1244 | // on failure. Otherwise returns pointer to the derived class portion of the entry and initializes the |
| 1245 | // provided LookupContext to allow enumeration of any further matches. |
| 1246 | template <NGEN_HASH_PARAMS> |
| 1247 | DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::FindVolatileEntryByHash(NgenHashValue iHash, LookupContext *pContext) |
| 1248 | { |
| 1249 | CONTRACTL |
| 1250 | { |
| 1251 | NOTHROW; |
| 1252 | GC_NOTRIGGER; |
| 1253 | MODE_ANY; |
| 1254 | SUPPORTS_DAC; |
| 1255 | PRECONDITION(CheckPointer(pContext)); |
| 1256 | } |
| 1257 | CONTRACTL_END; |
| 1258 | |
| 1259 | // No point looking if there are no entries. |
| 1260 | if (m_cWarmEntries == 0) |
| 1261 | return NULL; |
| 1262 | |
| 1263 | // Since there is at least one entry there must be at least one bucket. |
| 1264 | _ASSERTE(m_cWarmBuckets > 0); |
| 1265 | |
| 1266 | // Point at the first entry in the bucket chain which would contain any entries with the given hash code. |
| 1267 | PTR_VolatileEntry pEntry = (GetWarmBuckets())[iHash % m_cWarmBuckets]; |
| 1268 | |
| 1269 | // Walk the bucket chain one entry at a time. |
| 1270 | while (pEntry) |
| 1271 | { |
| 1272 | if (pEntry->m_iHashValue == iHash) |
| 1273 | { |
| 1274 | // We've found our match. |
| 1275 | |
| 1276 | // Record our current search state into the provided context so that a subsequent call to |
| 1277 | // BaseFindNextEntryByHash can pick up the search where it left off. |
| 1278 | pContext->m_pEntry = dac_cast<TADDR>(pEntry); |
| 1279 | pContext->m_eType = Warm; |
| 1280 | |
| 1281 | // Return the address of the sub-classes' embedded entry structure. |
| 1282 | return VALUE_FROM_VOLATILE_ENTRY(pEntry); |
| 1283 | } |
| 1284 | |
| 1285 | // Move to the next entry in the chain. |
| 1286 | pEntry = pEntry->m_pNextEntry; |
| 1287 | } |
| 1288 | |
| 1289 | // If we get here then none of the entries in the target bucket matched the hash code and we have a miss |
| 1290 | // (for this section of the table at least). |
| 1291 | return NULL; |
| 1292 | } |
| 1293 | |
| 1294 | // Initializes the iterator context passed by the caller to make it ready to walk every entry in the table in |
| 1295 | // an arbitrary order. Call pIterator->Next() to retrieve the first entry. |
| 1296 | template <NGEN_HASH_PARAMS> |
| 1297 | void NgenHashTable<NGEN_HASH_ARGS>::BaseInitIterator(BaseIterator *pIterator) |
| 1298 | { |
| 1299 | LIMITED_METHOD_DAC_CONTRACT; |
| 1300 | |
| 1301 | pIterator->m_pTable = dac_cast<DPTR(NgenHashTable<NGEN_HASH_ARGS>)>(this); |
| 1302 | pIterator->m_pEntry = NULL; |
| 1303 | #ifdef FEATURE_PREJIT |
| 1304 | pIterator->m_eType = Hot; |
| 1305 | pIterator->m_cRemainingEntries = m_sHotEntries.m_cEntries; |
| 1306 | #else |
| 1307 | pIterator->m_eType = Warm; |
| 1308 | pIterator->m_dwBucket = 0; |
| 1309 | #endif |
| 1310 | } |
| 1311 | |
| 1312 | // Returns a pointer to the next entry in the hash table or NULL once all entries have been enumerated. Once |
| 1313 | // NULL has been return the only legal operation is to re-initialize the iterator with BaseInitIterator. |
| 1314 | template <NGEN_HASH_PARAMS> |
| 1315 | DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::BaseIterator::Next() |
| 1316 | { |
| 1317 | CONTRACTL |
| 1318 | { |
| 1319 | NOTHROW; |
| 1320 | GC_NOTRIGGER; |
| 1321 | MODE_ANY; |
| 1322 | SUPPORTS_DAC; |
| 1323 | } |
| 1324 | CONTRACTL_END; |
| 1325 | |
| 1326 | // We might need to re-iterate our algorithm if we fall off the end of one hash table section (Hot or |
| 1327 | // Warm) and need to move onto the next. |
| 1328 | while (true) |
| 1329 | { |
| 1330 | // What type of section are we walking (Hot, Warm or Cold)? |
| 1331 | switch (m_eType) |
| 1332 | { |
| 1333 | #ifdef FEATURE_PREJIT |
| 1334 | case Hot: |
| 1335 | { |
| 1336 | if (m_cRemainingEntries) |
| 1337 | { |
| 1338 | // There's at least one more entry in the hot section to report. |
| 1339 | |
| 1340 | if (m_pEntry == NULL) |
| 1341 | { |
| 1342 | // This is our first lookup in the hot section, return the first entry in the hot array. |
| 1343 | m_pEntry = dac_cast<TADDR>(m_pTable->GetPersistedHotEntries()); |
| 1344 | } |
| 1345 | else |
| 1346 | { |
| 1347 | // This is not our first lookup, return the entry immediately after the last one we |
| 1348 | // reported. |
| 1349 | m_pEntry = (TADDR)(m_pEntry + sizeof(PersistedEntry)); |
| 1350 | } |
| 1351 | |
| 1352 | // There's one less entry to report in the future. |
| 1353 | m_cRemainingEntries--; |
| 1354 | |
| 1355 | // Return the pointer to the embedded sub-class entry in the entry we found. |
| 1356 | return VALUE_FROM_PERSISTED_ENTRY(dac_cast<PTR_PersistedEntry>(m_pEntry)); |
| 1357 | } |
| 1358 | |
| 1359 | // We ran out of hot entries. Set up to search the warm section next and go round the loop again. |
| 1360 | m_eType = Warm; |
| 1361 | m_pEntry = NULL; |
| 1362 | m_dwBucket = 0; |
| 1363 | break; |
| 1364 | } |
| 1365 | #endif // FEATURE_PREJIT |
| 1366 | |
| 1367 | case Warm: |
| 1368 | { |
| 1369 | if (m_pEntry == NULL) |
| 1370 | { |
| 1371 | // This is our first lookup in the warm section for a particular bucket, return the first |
| 1372 | // entry in that bucket. |
| 1373 | m_pEntry = dac_cast<TADDR>((m_pTable->GetWarmBuckets())[m_dwBucket]); |
| 1374 | } |
| 1375 | else |
| 1376 | { |
| 1377 | // This is not our first lookup, return the entry immediately after the last one we |
| 1378 | // reported. |
| 1379 | m_pEntry = dac_cast<TADDR>(dac_cast<PTR_VolatileEntry>(m_pEntry)->m_pNextEntry); |
| 1380 | } |
| 1381 | |
| 1382 | // If we found an entry in the last step return with it. |
| 1383 | if (m_pEntry) |
| 1384 | return VALUE_FROM_VOLATILE_ENTRY(dac_cast<PTR_VolatileEntry>(m_pEntry)); |
| 1385 | |
| 1386 | // Othwerwise we found the end of a bucket chain. Increment the current bucket and, if there are |
| 1387 | // buckets left to scan go back around again. |
| 1388 | m_dwBucket++; |
| 1389 | if (m_dwBucket < m_pTable->m_cWarmBuckets) |
| 1390 | break; |
| 1391 | |
| 1392 | // Othwerwise we should move onto the cold section (if we have one). |
| 1393 | |
| 1394 | #ifdef FEATURE_PREJIT |
| 1395 | m_eType = Cold; |
| 1396 | m_pEntry = NULL; |
| 1397 | m_cRemainingEntries = m_pTable->m_sColdEntries.m_cEntries; |
| 1398 | break; |
| 1399 | #else |
| 1400 | return NULL; |
| 1401 | #endif // FEATURE_PREJIT |
| 1402 | } |
| 1403 | |
| 1404 | #ifdef FEATURE_PREJIT |
| 1405 | case Cold: |
| 1406 | { |
| 1407 | if (m_cRemainingEntries) |
| 1408 | { |
| 1409 | // There's at least one more entry in the cold section to report. |
| 1410 | |
| 1411 | if (m_pEntry == NULL) |
| 1412 | { |
| 1413 | // This is our first lookup in the cold section, return the first entry in the cold array. |
| 1414 | m_pEntry = dac_cast<TADDR>(m_pTable->GetPersistedColdEntries()); |
| 1415 | } |
| 1416 | else |
| 1417 | { |
| 1418 | // This is not our first lookup, return the entry immediately after the last one we |
| 1419 | // reported. |
| 1420 | m_pEntry = (TADDR)(m_pEntry + sizeof(PersistedEntry)); |
| 1421 | } |
| 1422 | |
| 1423 | // There's one less entry to report in the future. |
| 1424 | m_cRemainingEntries--; |
| 1425 | |
| 1426 | // Return the pointer to the embedded sub-class entry in the entry we found. |
| 1427 | return VALUE_FROM_PERSISTED_ENTRY(dac_cast<PTR_PersistedEntry>(m_pEntry)); |
| 1428 | } |
| 1429 | |
| 1430 | // If there are no more entries in the cold section that's it, the whole table has been scanned. |
| 1431 | return NULL; |
| 1432 | } |
| 1433 | #endif // FEATURE_PREJIT |
| 1434 | |
| 1435 | default: |
| 1436 | _ASSERTE(!"Invalid hash entry type" ); |
| 1437 | } |
| 1438 | } |
| 1439 | } |
| 1440 | |
| 1441 | // Get a pointer to the referenced entry. |
| 1442 | template <NGEN_HASH_PARAMS> |
| 1443 | DPTR(VALUE) NgenHashEntryRef<NGEN_HASH_ARGS>::Get() |
| 1444 | { |
| 1445 | CONTRACTL |
| 1446 | { |
| 1447 | NOTHROW; |
| 1448 | GC_NOTRIGGER; |
| 1449 | MODE_ANY; |
| 1450 | SUPPORTS_DAC; |
| 1451 | } |
| 1452 | CONTRACTL_END; |
| 1453 | |
| 1454 | // Short-cut the NULL case, it's a lot cheaper than the code below when compiling for DAC. |
| 1455 | if (m_rpEntryRef.IsNull()) |
| 1456 | return NULL; |
| 1457 | |
| 1458 | // Note that the following code uses a special DAC lookup for an interior pointer (i.e. "this" isn't a |
| 1459 | // host address corresponding to a DAC marshalled instance, it's some host address within such an |
| 1460 | // instance). These lookups are a little slower than the regular kind since we have to search for the |
| 1461 | // containing instance. |
| 1462 | |
| 1463 | // @todo: The following causes gcc to choke on Mac 10.4 at least (complains that offsetof is being passed |
| 1464 | // four arguments instead of two). Expanding the top-level macro manually fixes this. |
| 1465 | // TADDR pBase = PTR_HOST_INT_MEMBER_TADDR(NgenHashEntryRef<NGEN_HASH_ARGS>, this, m_rpEntryRef); |
| 1466 | TADDR pBase = PTR_HOST_INT_TO_TADDR(this) + (TADDR)offsetof(NgenHashEntryRef<NGEN_HASH_ARGS>, m_rpEntryRef); |
| 1467 | |
| 1468 | return m_rpEntryRef.GetValue(pBase); |
| 1469 | } |
| 1470 | |
| 1471 | #ifndef DACCESS_COMPILE |
| 1472 | |
| 1473 | // Set the reference to point to the given entry. |
| 1474 | template <NGEN_HASH_PARAMS> |
| 1475 | void NgenHashEntryRef<NGEN_HASH_ARGS>::Set(VALUE *pEntry) |
| 1476 | { |
| 1477 | CONTRACTL |
| 1478 | { |
| 1479 | NOTHROW; |
| 1480 | GC_NOTRIGGER; |
| 1481 | MODE_ANY; |
| 1482 | } |
| 1483 | CONTRACTL_END; |
| 1484 | |
| 1485 | m_rpEntryRef.SetValueMaybeNull(pEntry); |
| 1486 | } |
| 1487 | |
| 1488 | #ifdef FEATURE_PREJIT |
| 1489 | |
| 1490 | // Call this during the ngen Fixup phase to adjust the relative pointer to account for ngen image layout. |
| 1491 | template <NGEN_HASH_PARAMS> |
| 1492 | void NgenHashEntryRef<NGEN_HASH_ARGS>::Fixup(DataImage *pImage, NgenHashTable<NGEN_HASH_ARGS> *pTable) |
| 1493 | { |
| 1494 | STANDARD_VM_CONTRACT; |
| 1495 | |
| 1496 | // No fixup required for null pointers. |
| 1497 | if (m_rpEntryRef.IsNull()) |
| 1498 | return; |
| 1499 | |
| 1500 | // Location is the field containing the entry reference. We need to determine the ngen zap node that |
| 1501 | // contains this field (it'll be part of either the hot or cold entry arrays). Then we can determine the |
| 1502 | // offset of the field from the beginning of the node. |
| 1503 | BYTE *pLocation = (BYTE*)&m_rpEntryRef; |
| 1504 | BYTE *pLocationBase; |
| 1505 | DWORD cbLocationOffset; |
| 1506 | |
| 1507 | if (pLocation >= (BYTE*)pTable->GetPersistedHotEntries() && |
| 1508 | pLocation < (BYTE*)(pTable->GetPersistedHotEntries() + pTable->m_sHotEntries.m_cEntries)) |
| 1509 | { |
| 1510 | // The field is in a hot entry. |
| 1511 | pLocationBase = (BYTE*)pTable->GetPersistedHotEntries(); |
| 1512 | } |
| 1513 | else if (pLocation >= (BYTE*)pTable->GetPersistedColdEntries() && |
| 1514 | pLocation < (BYTE*)(pTable->GetPersistedColdEntries() + pTable->m_sColdEntries.m_cEntries)) |
| 1515 | { |
| 1516 | // The field is in a cold entry. |
| 1517 | pLocationBase = (BYTE*)pTable->GetPersistedColdEntries(); |
| 1518 | } |
| 1519 | else |
| 1520 | { |
| 1521 | // The field doesn't lie in one of the entry arrays. The caller has passed us an NgenHashEntryRef that |
| 1522 | // wasn't embedded as a field in one of this hash's entries. |
| 1523 | _ASSERTE(!"NgenHashEntryRef must be a field in an NgenHashTable entry for Fixup to work" ); |
| 1524 | return; |
| 1525 | } |
| 1526 | cbLocationOffset = static_cast<DWORD>(pLocation - pLocationBase); |
| 1527 | |
| 1528 | // Target is the address of the entry that this reference points to. Go through the same kind of logic to |
| 1529 | // determine which section the target entry lives in, hot or cold. |
| 1530 | BYTE *pTarget = (BYTE*)m_rpEntryRef.GetValue(); |
| 1531 | BYTE *pTargetBase; |
| 1532 | DWORD cbTargetOffset; |
| 1533 | |
| 1534 | if (pTarget >= (BYTE*)pTable->GetPersistedHotEntries() && |
| 1535 | pTarget < (BYTE*)(pTable->GetPersistedHotEntries() + pTable->m_sHotEntries.m_cEntries)) |
| 1536 | { |
| 1537 | // The target is a hot entry. |
| 1538 | pTargetBase = (BYTE*)pTable->GetPersistedHotEntries(); |
| 1539 | } |
| 1540 | else if (pTarget >= (BYTE*)pTable->GetPersistedColdEntries() && |
| 1541 | pTarget < (BYTE*)(pTable->GetPersistedColdEntries() + pTable->m_sColdEntries.m_cEntries)) |
| 1542 | { |
| 1543 | // The target is a cold entry. |
| 1544 | pTargetBase = (BYTE*)pTable->GetPersistedColdEntries(); |
| 1545 | } |
| 1546 | else |
| 1547 | { |
| 1548 | // The target doesn't lie in one of the entry arrays. The caller has passed us an NgenHashEntryRef that |
| 1549 | // points to an entry (or other memory) not in our hash table. |
| 1550 | _ASSERTE(!"NgenHashEntryRef must refer to an entry in the same hash table" ); |
| 1551 | return; |
| 1552 | } |
| 1553 | cbTargetOffset = static_cast<DWORD>(pTarget - pTargetBase); |
| 1554 | |
| 1555 | // Now we have enough data to ask for a fixup to be generated for this field. The fixup type |
| 1556 | // IMAGE_REL_BASED_RELPTR means we won't actually get a base relocation fixup (an entry in the ngen image |
| 1557 | // that causes a load-time fixup to be applied). Instead this record will just adjust the relative value |
| 1558 | // in the field once the ngen image layout is finalized and it knows the final locations of the field and |
| 1559 | // target zap nodes. |
| 1560 | pImage->FixupField(pLocationBase, cbLocationOffset, pTargetBase, cbTargetOffset, IMAGE_REL_BASED_RELPTR); |
| 1561 | } |
| 1562 | #endif // FEATURE_PREJIT |
| 1563 | #endif // !DACCESS_COMPILE |
| 1564 | |