1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | |
6 | |
7 | #include "common.h" |
8 | |
9 | #ifdef FEATURE_PREJIT |
10 | |
11 | #include "dataimage.h" |
12 | #include "compile.h" |
13 | |
14 | #include "field.h" |
15 | |
16 | // |
17 | // Include Zapper infrastructure here |
18 | // |
19 | // dataimage.cpp is the only place where Zapper infrasture should be used directly in the VM. |
20 | // The rest of the VM should never use Zapper infrastructure directly for good layering. |
21 | // The long term goal is to move all NGen specific parts like Save and Fixup methods out of the VM, |
22 | // and remove the dataimage.cpp completely. |
23 | // |
24 | #include "zapper.h" |
25 | #include "../zap/zapwriter.h" |
26 | #include "../zap/zapimage.h" |
27 | #include "../zap/zapimport.h" |
28 | #include "inlinetracking.h" |
29 | |
30 | #define NodeTypeForItemKind(kind) ((ZapNodeType)(ZapNodeType_StoredStructure + kind)) |
31 | |
32 | class ZapStoredStructure : public ZapNode |
33 | { |
34 | DWORD m_dwSize; |
35 | BYTE m_kind; |
36 | BYTE m_align; |
37 | |
38 | public: |
39 | ZapStoredStructure(DWORD dwSize, BYTE kind, BYTE align) |
40 | : m_dwSize(dwSize), m_kind(kind), m_align(align) |
41 | { |
42 | } |
43 | |
44 | void * GetData() |
45 | { |
46 | return this + 1; |
47 | } |
48 | |
49 | DataImage::ItemKind GetKind() |
50 | { |
51 | return (DataImage::ItemKind)m_kind; |
52 | } |
53 | |
54 | virtual DWORD GetSize() |
55 | { |
56 | return m_dwSize; |
57 | } |
58 | |
59 | virtual UINT GetAlignment() |
60 | { |
61 | return m_align; |
62 | } |
63 | |
64 | virtual ZapNodeType GetType() |
65 | { |
66 | return NodeTypeForItemKind(m_kind); |
67 | } |
68 | |
69 | virtual void Save(ZapWriter * pZapWriter); |
70 | }; |
71 | |
72 | inline ZapStoredStructure * AsStoredStructure(ZapNode * pNode) |
73 | { |
74 | // Verify that it is one of the StoredStructure subtypes |
75 | _ASSERTE(pNode->GetType() >= ZapNodeType_StoredStructure); |
76 | return (ZapStoredStructure *)pNode; |
77 | } |
78 | |
79 | struct InternedStructureKey |
80 | { |
81 | InternedStructureKey(const void * data, DWORD dwSize, DataImage::ItemKind kind) |
82 | : m_data(data), m_dwSize(dwSize), m_kind(kind) |
83 | { |
84 | } |
85 | |
86 | const void *m_data; |
87 | DWORD m_dwSize; |
88 | DataImage::ItemKind m_kind; |
89 | }; |
90 | |
91 | class InternedStructureTraits : public NoRemoveSHashTraits< DefaultSHashTraits<ZapStoredStructure *> > |
92 | { |
93 | public: |
94 | typedef InternedStructureKey key_t; |
95 | |
96 | static key_t GetKey(element_t e) |
97 | { |
98 | LIMITED_METHOD_CONTRACT; |
99 | return InternedStructureKey(e->GetData(), e->GetSize(), e->GetKind()); |
100 | } |
101 | static BOOL Equals(key_t k1, key_t k2) |
102 | { |
103 | LIMITED_METHOD_CONTRACT; |
104 | return (k1.m_dwSize == k2.m_dwSize) && |
105 | (k1.m_kind == k2.m_kind) && |
106 | memcmp(k1.m_data, k2.m_data, k1.m_dwSize) == 0; |
107 | } |
108 | static count_t Hash(key_t k) |
109 | { |
110 | LIMITED_METHOD_CONTRACT; |
111 | return (count_t)k.m_dwSize ^ (count_t)k.m_kind ^ HashBytes((BYTE *)k.m_data, k.m_dwSize); |
112 | } |
113 | |
114 | static element_t Null() { LIMITED_METHOD_CONTRACT; return NULL; } |
115 | static bool IsNull(const element_t &e) { LIMITED_METHOD_CONTRACT; return e == NULL; } |
116 | }; |
117 | |
118 | DataImage::DataImage(Module *module, CEEPreloader *preloader) |
119 | : m_module(module), |
120 | m_preloader(preloader), |
121 | m_iCurrentFixup(0), // Dev11 bug 181494 instrumentation |
122 | m_pInternedStructures(NULL), |
123 | m_pCurrentAssociatedMethodTable(NULL) |
124 | { |
125 | m_pZapImage = m_preloader->GetDataStore()->GetZapImage(); |
126 | m_pZapImage->m_pDataImage = this; |
127 | |
128 | m_pInternedStructures = new InternedStructureHashTable(); |
129 | m_inlineTrackingMap = new InlineTrackingMap(); |
130 | } |
131 | |
132 | DataImage::~DataImage() |
133 | { |
134 | delete m_pInternedStructures; |
135 | delete m_inlineTrackingMap; |
136 | } |
137 | |
138 | void DataImage::PreSave() |
139 | { |
140 | #ifndef ZAP_HASHTABLE_TUNING |
141 | Preallocate(); |
142 | #endif |
143 | } |
144 | |
145 | void DataImage::PostSave() |
146 | { |
147 | #ifdef ZAP_HASHTABLE_TUNING |
148 | // If ZAP_HASHTABLE_TUNING is defined, preallocate is overloaded to print the tunning constants |
149 | Preallocate(); |
150 | #endif |
151 | } |
152 | |
153 | DWORD DataImage::GetMethodProfilingFlags(MethodDesc * pMD) |
154 | { |
155 | STANDARD_VM_CONTRACT; |
156 | |
157 | // We are not differentiating unboxing stubs vs. normal method descs in IBC data yet |
158 | if (pMD->IsUnboxingStub()) |
159 | pMD = pMD->GetWrappedMethodDesc(); |
160 | |
161 | const MethodProfilingData * pData = m_methodProfilingData.LookupPtr(pMD); |
162 | return (pData != NULL) ? pData->flags : 0; |
163 | } |
164 | |
165 | void DataImage::SetMethodProfilingFlags(MethodDesc * pMD, DWORD flags) |
166 | { |
167 | STANDARD_VM_CONTRACT; |
168 | |
169 | const MethodProfilingData * pData = m_methodProfilingData.LookupPtr(pMD); |
170 | if (pData != NULL) |
171 | { |
172 | const_cast<MethodProfilingData *>(pData)->flags |= flags; |
173 | return; |
174 | } |
175 | |
176 | MethodProfilingData data; |
177 | data.pMD = pMD; |
178 | data.flags = flags; |
179 | m_methodProfilingData.Add(data); |
180 | } |
181 | |
182 | void DataImage::Preallocate() |
183 | { |
184 | STANDARD_VM_CONTRACT; |
185 | |
186 | // TODO: Move to ZapImage |
187 | |
188 | PEDecoder pe((void *)m_module->GetFile()->GetManagedFileContents()); |
189 | |
190 | COUNT_T cbILImage = pe.GetSize(); |
191 | |
192 | // Curb the estimate to handle corner cases gracefuly |
193 | cbILImage = min(cbILImage, 50000000); |
194 | |
195 | PREALLOCATE_HASHTABLE(DataImage::m_structures, 0.019, cbILImage); |
196 | PREALLOCATE_ARRAY(DataImage::m_structuresInOrder, 0.0088, cbILImage); |
197 | PREALLOCATE_ARRAY(DataImage::m_Fixups, 0.046, cbILImage); |
198 | PREALLOCATE_HASHTABLE(DataImage::m_surrogates, 0.0025, cbILImage); |
199 | PREALLOCATE_HASHTABLE((*DataImage::m_pInternedStructures), 0.0007, cbILImage); |
200 | } |
201 | |
202 | ZapHeap * DataImage::GetHeap() |
203 | { |
204 | LIMITED_METHOD_CONTRACT; |
205 | return m_pZapImage->GetHeap(); |
206 | } |
207 | |
208 | void DataImage::AddStructureInOrder(ZapNode *pNode, BOOL fMaintainSaveOrder /*=FALSE*/) |
209 | { |
210 | WRAPPER_NO_CONTRACT; |
211 | |
212 | SavedNodeEntry entry; |
213 | entry.pNode = pNode; |
214 | entry.dwAssociatedOrder = 0; |
215 | |
216 | if (fMaintainSaveOrder) |
217 | { |
218 | entry.dwAssociatedOrder = MAINTAIN_SAVE_ORDER; |
219 | } |
220 | else if (m_pCurrentAssociatedMethodTable) |
221 | { |
222 | TypeHandle th = TypeHandle(m_pCurrentAssociatedMethodTable); |
223 | entry.dwAssociatedOrder = m_pZapImage->LookupClassLayoutOrder(CORINFO_CLASS_HANDLE(th.AsPtr())); |
224 | } |
225 | |
226 | m_structuresInOrder.Append(entry); |
227 | } |
228 | |
229 | ZapStoredStructure * DataImage::StoreStructureHelper(const void *data, SIZE_T size, |
230 | DataImage::ItemKind kind, |
231 | int align, |
232 | BOOL fMaintainSaveOrder) |
233 | { |
234 | STANDARD_VM_CONTRACT; |
235 | |
236 | S_SIZE_T cbAllocSize = S_SIZE_T(sizeof(ZapStoredStructure)) + S_SIZE_T(size); |
237 | if(cbAllocSize.IsOverflow()) |
238 | ThrowHR(COR_E_OVERFLOW); |
239 | |
240 | void * pMemory = new (GetHeap()) BYTE[cbAllocSize.Value()]; |
241 | |
242 | // PE files cannot be larger than 4 GB |
243 | if (DWORD(size) != size) |
244 | ThrowHR(E_UNEXPECTED); |
245 | |
246 | ZapStoredStructure * pStructure = new (pMemory) ZapStoredStructure((DWORD)size, static_cast<BYTE>(kind), static_cast<BYTE>(align)); |
247 | |
248 | if (data != NULL) |
249 | { |
250 | CopyMemory(pStructure->GetData(), data, size); |
251 | BindPointer(data, pStructure, 0); |
252 | } |
253 | |
254 | m_pLastLookup = NULL; |
255 | |
256 | AddStructureInOrder(pStructure, fMaintainSaveOrder); |
257 | |
258 | return pStructure; |
259 | } |
260 | |
261 | // Bind pointer to the relative offset in ZapNode |
262 | void DataImage::BindPointer(const void *p, ZapNode * pNode, SSIZE_T offset) |
263 | { |
264 | STANDARD_VM_CONTRACT; |
265 | |
266 | _ASSERTE(m_structures.LookupPtr(p) == NULL); |
267 | |
268 | StructureEntry e; |
269 | e.ptr = p; |
270 | e.pNode = pNode; |
271 | e.offset = offset; |
272 | m_structures.Add(e); |
273 | |
274 | m_pLastLookup = NULL; |
275 | } |
276 | |
277 | void DataImage::CopyData(ZapStoredStructure * pNode, const void * p, ULONG size) |
278 | { |
279 | memcpy(pNode->GetData(), p, size); |
280 | } |
281 | |
282 | void DataImage::CopyDataToOffset(ZapStoredStructure * pNode, ULONG offset, const void * p, ULONG size) |
283 | { |
284 | SIZE_T target = (SIZE_T) (pNode->GetData()); |
285 | target += offset; |
286 | |
287 | memcpy((void *) target, p, size); |
288 | } |
289 | |
290 | void DataImage::PlaceStructureForAddress(const void * data, CorCompileSection section) |
291 | { |
292 | STANDARD_VM_CONTRACT; |
293 | |
294 | if (data == NULL) |
295 | return; |
296 | |
297 | const StructureEntry * pEntry = m_structures.LookupPtr(data); |
298 | if (pEntry == NULL) |
299 | return; |
300 | |
301 | ZapNode * pNode = pEntry->pNode; |
302 | if (!pNode->IsPlaced()) |
303 | { |
304 | ZapVirtualSection * pSection = m_pZapImage->GetSection(section); |
305 | pSection->Place(pNode); |
306 | } |
307 | } |
308 | |
309 | void DataImage::PlaceInternedStructureForAddress(const void * data, CorCompileSection sectionIfReused, CorCompileSection sectionIfSingleton) |
310 | { |
311 | STANDARD_VM_CONTRACT; |
312 | |
313 | if (data == NULL) |
314 | return; |
315 | |
316 | const StructureEntry * pEntry = m_structures.LookupPtr(data); |
317 | if (pEntry == NULL) |
318 | return; |
319 | |
320 | ZapNode * pNode = pEntry->pNode; |
321 | if (!pNode->IsPlaced()) |
322 | { |
323 | CorCompileSection section = m_reusedStructures.Contains(pNode) ? sectionIfReused : sectionIfSingleton; |
324 | ZapVirtualSection * pSection = m_pZapImage->GetSection(section); |
325 | pSection->Place(pNode); |
326 | } |
327 | } |
328 | |
329 | void DataImage::FixupPointerField(PVOID p, SSIZE_T offset) |
330 | { |
331 | STANDARD_VM_CONTRACT; |
332 | |
333 | PVOID pTarget = *(PVOID UNALIGNED *)((BYTE *)p + offset); |
334 | |
335 | if (pTarget == NULL) |
336 | { |
337 | ZeroPointerField(p, offset); |
338 | return; |
339 | } |
340 | |
341 | FixupField(p, offset, pTarget); |
342 | } |
343 | |
344 | void DataImage::FixupRelativePointerField(PVOID p, SSIZE_T offset) |
345 | { |
346 | STANDARD_VM_CONTRACT; |
347 | |
348 | PVOID pTarget = RelativePointer<PTR_VOID>::GetValueMaybeNullAtPtr((TADDR)p + offset); |
349 | |
350 | if (pTarget == NULL) |
351 | { |
352 | ZeroPointerField(p, offset); |
353 | return; |
354 | } |
355 | |
356 | FixupField(p, offset, pTarget, 0, IMAGE_REL_BASED_RELPTR); |
357 | } |
358 | |
359 | static void EncodeTargetOffset(PVOID pLocation, SSIZE_T targetOffset, ZapRelocationType type) |
360 | { |
361 | // Store the targetOffset into the location of the reloc temporarily |
362 | switch (type) |
363 | { |
364 | case IMAGE_REL_BASED_PTR: |
365 | case IMAGE_REL_BASED_RELPTR: |
366 | *(UNALIGNED TADDR *)pLocation = (TADDR)targetOffset; |
367 | break; |
368 | |
369 | case IMAGE_REL_BASED_ABSOLUTE: |
370 | *(UNALIGNED DWORD *)pLocation = (DWORD)targetOffset; |
371 | break; |
372 | |
373 | case IMAGE_REL_BASED_ABSOLUTE_TAGGED: |
374 | _ASSERTE(targetOffset == 0); |
375 | *(UNALIGNED TADDR *)pLocation = 0; |
376 | break; |
377 | |
378 | #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) |
379 | case IMAGE_REL_BASED_REL32: |
380 | *(UNALIGNED INT32 *)pLocation = (INT32)targetOffset; |
381 | break; |
382 | #endif // _TARGET_X86_ || _TARGET_AMD64_ |
383 | |
384 | default: |
385 | _ASSERTE(0); |
386 | } |
387 | } |
388 | |
389 | static SSIZE_T DecodeTargetOffset(PVOID pLocation, ZapRelocationType type) |
390 | { |
391 | // Store the targetOffset into the location of the reloc temporarily |
392 | switch (type) |
393 | { |
394 | case IMAGE_REL_BASED_PTR: |
395 | case IMAGE_REL_BASED_RELPTR: |
396 | return (SSIZE_T)*(UNALIGNED TADDR *)pLocation; |
397 | |
398 | case IMAGE_REL_BASED_ABSOLUTE: |
399 | return *(UNALIGNED DWORD *)pLocation; |
400 | |
401 | case IMAGE_REL_BASED_ABSOLUTE_TAGGED: |
402 | _ASSERTE(*(UNALIGNED TADDR *)pLocation == 0); |
403 | return 0; |
404 | |
405 | #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) |
406 | case IMAGE_REL_BASED_REL32: |
407 | return *(UNALIGNED INT32 *)pLocation; |
408 | #endif // _TARGET_X86_ || _TARGET_AMD64_ |
409 | |
410 | default: |
411 | _ASSERTE(0); |
412 | return 0; |
413 | } |
414 | } |
415 | |
416 | void DataImage::FixupField(PVOID p, SSIZE_T offset, PVOID pTarget, SSIZE_T targetOffset, ZapRelocationType type) |
417 | { |
418 | STANDARD_VM_CONTRACT; |
419 | |
420 | m_iCurrentFixup++; // Dev11 bug 181494 instrumentation |
421 | |
422 | const StructureEntry * pEntry = m_pLastLookup; |
423 | if (pEntry == NULL || pEntry->ptr != p) |
424 | { |
425 | pEntry = m_structures.LookupPtr(p); |
426 | _ASSERTE(pEntry != NULL && |
427 | "StoreStructure or BindPointer have to be called on all save data." ); |
428 | m_pLastLookup = pEntry; |
429 | } |
430 | offset += pEntry->offset; |
431 | _ASSERTE(0 <= offset && (DWORD)offset < pEntry->pNode->GetSize()); |
432 | |
433 | const StructureEntry * pTargetEntry = m_pLastLookup; |
434 | if (pTargetEntry == NULL || pTargetEntry->ptr != pTarget) |
435 | { |
436 | pTargetEntry = m_structures.LookupPtr(pTarget); |
437 | |
438 | _ASSERTE(pTargetEntry != NULL && |
439 | "The target of the fixup is not saved into the image" ); |
440 | } |
441 | targetOffset += pTargetEntry->offset; |
442 | _ASSERTE(0 <= targetOffset && (DWORD)targetOffset <= pTargetEntry->pNode->GetSize()); |
443 | |
444 | FixupEntry entry; |
445 | entry.m_type = type; |
446 | entry.m_offset = (DWORD)offset; |
447 | entry.m_pLocation = AsStoredStructure(pEntry->pNode); |
448 | entry.m_pTargetNode = pTargetEntry->pNode; |
449 | AppendFixup(entry); |
450 | |
451 | EncodeTargetOffset((BYTE *)AsStoredStructure(pEntry->pNode)->GetData() + offset, targetOffset, type); |
452 | } |
453 | |
454 | void DataImage::FixupFieldToNode(PVOID p, SSIZE_T offset, ZapNode * pTarget, SSIZE_T targetOffset, ZapRelocationType type) |
455 | { |
456 | STANDARD_VM_CONTRACT; |
457 | |
458 | m_iCurrentFixup++; // Dev11 bug 181494 instrumentation |
459 | |
460 | const StructureEntry * pEntry = m_pLastLookup; |
461 | if (pEntry == NULL || pEntry->ptr != p) |
462 | { |
463 | pEntry = m_structures.LookupPtr(p); |
464 | _ASSERTE(pEntry != NULL && |
465 | "StoreStructure or BindPointer have to be called on all save data." ); |
466 | m_pLastLookup = pEntry; |
467 | } |
468 | offset += pEntry->offset; |
469 | _ASSERTE(0 <= offset && (DWORD)offset < pEntry->pNode->GetSize()); |
470 | |
471 | _ASSERTE(pTarget != NULL); |
472 | |
473 | FixupEntry entry; |
474 | entry.m_type = type; |
475 | entry.m_offset = (DWORD)offset; |
476 | entry.m_pLocation = AsStoredStructure(pEntry->pNode); |
477 | entry.m_pTargetNode = pTarget; |
478 | AppendFixup(entry); |
479 | |
480 | EncodeTargetOffset((BYTE *)AsStoredStructure(pEntry->pNode)->GetData() + offset, targetOffset, type); |
481 | } |
482 | |
483 | DWORD DataImage::GetRVA(const void *data) |
484 | { |
485 | STANDARD_VM_CONTRACT; |
486 | |
487 | const StructureEntry * pEntry = m_structures.LookupPtr(data); |
488 | _ASSERTE(pEntry != NULL); |
489 | |
490 | return pEntry->pNode->GetRVA() + (DWORD)pEntry->offset; |
491 | } |
492 | |
493 | void DataImage::ZeroField(PVOID p, SSIZE_T offset, SIZE_T size) |
494 | { |
495 | STANDARD_VM_CONTRACT; |
496 | |
497 | ZeroMemory(GetImagePointer(p, offset), size); |
498 | } |
499 | |
500 | void * DataImage::GetImagePointer(ZapStoredStructure * pNode) |
501 | { |
502 | return pNode->GetData(); |
503 | } |
504 | |
505 | void * DataImage::GetImagePointer(PVOID p, SSIZE_T offset) |
506 | { |
507 | STANDARD_VM_CONTRACT; |
508 | |
509 | const StructureEntry * pEntry = m_pLastLookup; |
510 | if (pEntry == NULL || pEntry->ptr != p) |
511 | { |
512 | pEntry = m_structures.LookupPtr(p); |
513 | _ASSERTE(pEntry != NULL && |
514 | "StoreStructure or BindPointer have to be called on all save data." ); |
515 | m_pLastLookup = pEntry; |
516 | } |
517 | offset += pEntry->offset; |
518 | _ASSERTE(0 <= offset && (DWORD)offset < pEntry->pNode->GetSize()); |
519 | |
520 | return (BYTE *)AsStoredStructure(pEntry->pNode)->GetData() + offset; |
521 | } |
522 | |
523 | ZapNode * DataImage::GetNodeForStructure(PVOID p, SSIZE_T * pOffset) |
524 | { |
525 | const StructureEntry * pEntry = m_pLastLookup; |
526 | if (pEntry == NULL || pEntry->ptr != p) |
527 | { |
528 | pEntry = m_structures.LookupPtr(p); |
529 | _ASSERTE(pEntry != NULL && |
530 | "StoreStructure or BindPointer have to be called on all save data." ); |
531 | } |
532 | *pOffset = pEntry->offset; |
533 | return pEntry->pNode; |
534 | } |
535 | |
536 | ZapStoredStructure * DataImage::StoreInternedStructure(const void *data, ULONG size, |
537 | DataImage::ItemKind kind, |
538 | int align) |
539 | { |
540 | STANDARD_VM_CONTRACT; |
541 | |
542 | ZapStoredStructure * pStructure = m_pInternedStructures->Lookup(InternedStructureKey(data, size, kind)); |
543 | |
544 | if (pStructure != NULL) |
545 | { |
546 | // Just add a new mapping for to the interned structure |
547 | BindPointer(data, pStructure, 0); |
548 | |
549 | // Track that this structure has been successfully reused by interning |
550 | NoteReusedStructure(data); |
551 | } |
552 | else |
553 | { |
554 | // We have not seen this structure yet. Create a new one. |
555 | pStructure = StoreStructure(data, size, kind); |
556 | m_pInternedStructures->Add(pStructure); |
557 | } |
558 | |
559 | return pStructure; |
560 | } |
561 | |
562 | void DataImage::NoteReusedStructure(const void *data) |
563 | { |
564 | STANDARD_VM_CONTRACT; |
565 | |
566 | _ASSERTE(IsStored(data)); |
567 | |
568 | const StructureEntry * pEntry = m_structures.LookupPtr(data); |
569 | |
570 | if (!m_reusedStructures.Contains(pEntry->pNode)) |
571 | { |
572 | m_reusedStructures.Add(pEntry->pNode); |
573 | } |
574 | } |
575 | |
576 | // Save the info of an RVA into m_rvaInfoVector. |
577 | void DataImage::StoreRvaInfo(FieldDesc * pFD, |
578 | DWORD rva, |
579 | UINT size, |
580 | UINT align) |
581 | { |
582 | RvaInfoStructure rvaInfo; |
583 | |
584 | _ASSERTE(m_module == pFD->GetModule()); |
585 | _ASSERTE(m_module == pFD->GetLoaderModule()); |
586 | |
587 | rvaInfo.pFD = pFD; |
588 | rvaInfo.rva = rva; |
589 | rvaInfo.size = size; |
590 | rvaInfo.align = align; |
591 | |
592 | m_rvaInfoVector.Append(rvaInfo); |
593 | } |
594 | |
595 | // qsort compare function. |
596 | // Primary key: rva (ascending order). Secondary key: size (descending order). |
597 | int __cdecl DataImage::rvaInfoVectorEntryCmp(const void* a_, const void* b_) |
598 | { |
599 | LIMITED_METHOD_CONTRACT; |
600 | STATIC_CONTRACT_SO_TOLERANT; |
601 | DataImage::RvaInfoStructure *a = (DataImage::RvaInfoStructure *)a_; |
602 | DataImage::RvaInfoStructure *b = (DataImage::RvaInfoStructure *)b_; |
603 | int rvaComparisonResult = (int)(a->rva - b->rva); |
604 | if (rvaComparisonResult!=0) |
605 | return rvaComparisonResult; // Ascending order on rva |
606 | return (int)(b->size - a->size); // Descending order on size |
607 | } |
608 | |
609 | // Sort the list of RVA statics in an ascending order wrt the RVA and save them. |
610 | // For RVA structures with the same RVA, we will only store the one with the largest size. |
611 | void DataImage::SaveRvaStructure() |
612 | { |
613 | if (m_rvaInfoVector.IsEmpty()) |
614 | return; // No RVA static to save |
615 | |
616 | // Use qsort to sort the m_rvaInfoVector |
617 | qsort (&m_rvaInfoVector[0], // start of array |
618 | m_rvaInfoVector.GetCount(), // array size in elements |
619 | sizeof(RvaInfoStructure), // element size in bytes |
620 | rvaInfoVectorEntryCmp); // comparere function |
621 | |
622 | RvaInfoStructure * previousRvaInfo = NULL; |
623 | |
624 | for (COUNT_T i=0; i<m_rvaInfoVector.GetCount(); i++) { |
625 | |
626 | RvaInfoStructure * rvaInfo = &(m_rvaInfoVector[i]); |
627 | |
628 | // Verify that rvaInfo->rva are actually monotonically increasing and |
629 | // rvaInfo->size are monotonically decreasing if rva are the same. |
630 | _ASSERTE(previousRvaInfo==NULL || |
631 | previousRvaInfo->rva < rvaInfo->rva || |
632 | previousRvaInfo->rva == rvaInfo->rva && previousRvaInfo->size >= rvaInfo->size |
633 | ); |
634 | |
635 | if (previousRvaInfo==NULL || previousRvaInfo->rva != rvaInfo->rva) { |
636 | void * pRVAData = rvaInfo->pFD->GetStaticAddressHandle(NULL); |
637 | |
638 | // Note that we force the structures to be laid out in the order we save them |
639 | StoreStructureInOrder(pRVAData, rvaInfo->size, |
640 | DataImage::ITEM_RVA_STATICS, |
641 | rvaInfo->align); |
642 | } |
643 | |
644 | previousRvaInfo = rvaInfo; |
645 | } |
646 | } |
647 | |
648 | void DataImage::RegisterSurrogate(PVOID ptr, PVOID surrogate) |
649 | { |
650 | STANDARD_VM_CONTRACT; |
651 | |
652 | m_surrogates.Add(ptr, surrogate); |
653 | } |
654 | |
655 | PVOID DataImage::LookupSurrogate(PVOID ptr) |
656 | { |
657 | STANDARD_VM_CONTRACT; |
658 | |
659 | const KeyValuePair<PVOID, PVOID> * pEntry = m_surrogates.LookupPtr(ptr); |
660 | if (pEntry == NULL) |
661 | return NULL; |
662 | return pEntry->Value(); |
663 | } |
664 | |
665 | // Please read comments in corcompile.h for ZapVirtualSectionType before |
666 | // putting data items into sections. |
667 | FORCEINLINE static CorCompileSection GetSectionForNodeType(ZapNodeType type) |
668 | { |
669 | LIMITED_METHOD_CONTRACT; |
670 | |
671 | switch ((int)type) |
672 | { |
673 | // SECTION_MODULE |
674 | case NodeTypeForItemKind(DataImage::ITEM_MODULE): |
675 | return CORCOMPILE_SECTION_MODULE; |
676 | |
677 | // CORCOMPILE_SECTION_WRITE (Hot Writeable) |
678 | // things only go in here if they are: |
679 | // (a) explicitly identified by profiling data |
680 | // or (b) if we have no profiling for these items but they are frequently written to |
681 | case NodeTypeForItemKind(DataImage::ITEM_FILEREF_MAP): |
682 | case NodeTypeForItemKind(DataImage::ITEM_ASSEMREF_MAP): |
683 | case NodeTypeForItemKind(DataImage::ITEM_DYNAMIC_STATICS_INFO_TABLE): |
684 | case NodeTypeForItemKind(DataImage::ITEM_DYNAMIC_STATICS_INFO_ENTRY): |
685 | case NodeTypeForItemKind(DataImage::ITEM_CER_RESTORE_FLAGS): |
686 | return CORCOMPILE_SECTION_WRITE; |
687 | |
688 | // CORCOMPILE_SECTION_WRITEABLE (Cold Writeable) |
689 | case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE_SPECIAL_WRITEABLE): |
690 | case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE_DATA_COLD_WRITEABLE): |
691 | case NodeTypeForItemKind(DataImage::ITEM_DICTIONARY_WRITEABLE): |
692 | case NodeTypeForItemKind(DataImage::ITEM_FROZEN_OBJECTS): // sometimes the objhdr is modified |
693 | return CORCOMPILE_SECTION_WRITEABLE; |
694 | |
695 | // SECTION_HOT |
696 | // Other things go in here if |
697 | // (a) identified as reads by the profiling runs |
698 | // (b) if we have no profiling for these items but are identified as typically being read |
699 | case NodeTypeForItemKind(DataImage::ITEM_CER_ROOT_TABLE): |
700 | case NodeTypeForItemKind(DataImage::ITEM_RID_MAP_HOT): |
701 | case NodeTypeForItemKind(DataImage::ITEM_BINDER): |
702 | case NodeTypeForItemKind(DataImage::ITEM_MODULE_SECDESC): |
703 | case NodeTypeForItemKind(DataImage::ITEM_METHOD_DESC_HOT): |
704 | return CORCOMPILE_SECTION_HOT; |
705 | |
706 | case NodeTypeForItemKind(DataImage::ITEM_BINDER_ITEMS): // these are the guaranteed to be hot items |
707 | return CORCOMPILE_SECTION_READONLY_SHARED_HOT; |
708 | |
709 | // SECTION_READONLY_HOT |
710 | case NodeTypeForItemKind(DataImage::ITEM_GC_STATIC_HANDLES_HOT): // this is assumed to be hot. it is not written to. |
711 | case NodeTypeForItemKind(DataImage::ITEM_MODULE_CCTOR_INFO_HOT): |
712 | case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_BUCKETLIST_HOT): |
713 | case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_ENTRIES_RO_HOT): |
714 | return CORCOMPILE_SECTION_READONLY_HOT; |
715 | |
716 | // SECTION_HOT_WRITEABLE |
717 | case NodeTypeForItemKind(DataImage::ITEM_METHOD_DESC_HOT_WRITEABLE): |
718 | case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE_DATA_HOT_WRITEABLE): |
719 | case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_HOT): |
720 | case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_ENTRIES_HOT): |
721 | return CORCOMPILE_SECTION_HOT_WRITEABLE; |
722 | |
723 | case NodeTypeForItemKind(DataImage::ITEM_METHOD_PRECODE_HOT_WRITEABLE): |
724 | return CORCOMPILE_SECTION_METHOD_PRECODE_WRITE; |
725 | |
726 | case NodeTypeForItemKind(DataImage::ITEM_METHOD_PRECODE_HOT): |
727 | return CORCOMPILE_SECTION_METHOD_PRECODE_HOT; |
728 | |
729 | // SECTION_RVA_STATICS |
730 | case NodeTypeForItemKind(DataImage::ITEM_RVA_STATICS): |
731 | return CORCOMPILE_SECTION_RVA_STATICS_COLD; // This MUST go in this section |
732 | |
733 | // SECTION_WARM |
734 | case NodeTypeForItemKind(DataImage::ITEM_GUID_INFO): |
735 | case NodeTypeForItemKind(DataImage::ITEM_DICTIONARY_LAYOUT): |
736 | case NodeTypeForItemKind(DataImage::ITEM_EECLASS_WARM): |
737 | return CORCOMPILE_SECTION_WARM; |
738 | |
739 | // SECTION_READONLY_WARM |
740 | case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE): |
741 | case NodeTypeForItemKind(DataImage::ITEM_INTERFACE_MAP): |
742 | case NodeTypeForItemKind(DataImage::ITEM_DISPATCH_MAP): |
743 | case NodeTypeForItemKind(DataImage::ITEM_GENERICS_STATIC_FIELDDESCS): |
744 | case NodeTypeForItemKind(DataImage::ITEM_GC_STATIC_HANDLES_COLD): |
745 | case NodeTypeForItemKind(DataImage::ITEM_MODULE_CCTOR_INFO_COLD): |
746 | case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_NAME): |
747 | case NodeTypeForItemKind(DataImage::ITEM_PROPERTY_NAME_SET): |
748 | case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_SIG_READONLY_WARM): |
749 | return CORCOMPILE_SECTION_READONLY_WARM; |
750 | |
751 | case NodeTypeForItemKind(DataImage::ITEM_DICTIONARY): |
752 | return CORCOMPILE_SECTION_READONLY_DICTIONARY; |
753 | |
754 | case NodeTypeForItemKind(DataImage::ITEM_VTABLE_CHUNK): |
755 | return CORCOMPILE_SECTION_READONLY_VCHUNKS; |
756 | |
757 | // SECTION_CLASS_COLD |
758 | case NodeTypeForItemKind(DataImage::ITEM_PARAM_TYPEDESC): |
759 | case NodeTypeForItemKind(DataImage::ITEM_ARRAY_TYPEDESC): |
760 | case NodeTypeForItemKind(DataImage::ITEM_EECLASS): |
761 | case NodeTypeForItemKind(DataImage::ITEM_FIELD_MARSHALERS): |
762 | case NodeTypeForItemKind(DataImage::ITEM_FPTR_TYPEDESC): |
763 | #ifdef FEATURE_COMINTEROP |
764 | case NodeTypeForItemKind(DataImage::ITEM_SPARSE_VTABLE_MAP_TABLE): |
765 | #endif // FEATURE_COMINTEROP |
766 | return CORCOMPILE_SECTION_CLASS_COLD; |
767 | |
768 | //SECTION_READONLY_COLD |
769 | case NodeTypeForItemKind(DataImage::ITEM_FIELD_DESC_LIST): |
770 | case NodeTypeForItemKind(DataImage::ITEM_ENUM_VALUES): |
771 | case NodeTypeForItemKind(DataImage::ITEM_ENUM_NAME_POINTERS): |
772 | case NodeTypeForItemKind(DataImage::ITEM_ENUM_NAME): |
773 | case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_BUCKETLIST_COLD): |
774 | case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_ENTRIES_RO_COLD): |
775 | case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_SIG_READONLY): |
776 | #ifdef FEATURE_COMINTEROP |
777 | case NodeTypeForItemKind(DataImage::ITEM_SPARSE_VTABLE_MAP_ENTRIES): |
778 | #endif // FEATURE_COMINTEROP |
779 | case NodeTypeForItemKind(DataImage::ITEM_CLASS_VARIANCE_INFO): |
780 | return CORCOMPILE_SECTION_READONLY_COLD; |
781 | |
782 | // SECTION_CROSS_DOMAIN_INFO |
783 | case NodeTypeForItemKind(DataImage::ITEM_CROSS_DOMAIN_INFO): |
784 | case NodeTypeForItemKind(DataImage::ITEM_VTS_INFO): |
785 | return CORCOMPILE_SECTION_CROSS_DOMAIN_INFO; |
786 | |
787 | // SECTION_METHOD_DESC_COLD |
788 | case NodeTypeForItemKind(DataImage::ITEM_METHOD_DESC_COLD): |
789 | return CORCOMPILE_SECTION_METHOD_DESC_COLD; |
790 | |
791 | case NodeTypeForItemKind(DataImage::ITEM_METHOD_DESC_COLD_WRITEABLE): |
792 | case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_SIG): |
793 | return CORCOMPILE_SECTION_METHOD_DESC_COLD_WRITEABLE; |
794 | |
795 | case NodeTypeForItemKind(DataImage::ITEM_METHOD_PRECODE_COLD): |
796 | return CORCOMPILE_SECTION_METHOD_PRECODE_COLD; |
797 | |
798 | case NodeTypeForItemKind(DataImage::ITEM_METHOD_PRECODE_COLD_WRITEABLE): |
799 | return CORCOMPILE_SECTION_METHOD_PRECODE_COLD_WRITEABLE; |
800 | |
801 | // SECTION_MODULE_COLD |
802 | case NodeTypeForItemKind(DataImage::ITEM_TYPEDEF_MAP): |
803 | case NodeTypeForItemKind(DataImage::ITEM_TYPEREF_MAP): |
804 | case NodeTypeForItemKind(DataImage::ITEM_METHODDEF_MAP): |
805 | case NodeTypeForItemKind(DataImage::ITEM_FIELDDEF_MAP): |
806 | case NodeTypeForItemKind(DataImage::ITEM_MEMBERREF_MAP): |
807 | case NodeTypeForItemKind(DataImage::ITEM_GENERICPARAM_MAP): |
808 | case NodeTypeForItemKind(DataImage::ITEM_GENERICTYPEDEF_MAP): |
809 | case NodeTypeForItemKind(DataImage::ITEM_PROPERTYINFO_MAP): |
810 | case NodeTypeForItemKind(DataImage::ITEM_TYVAR_TYPEDESC): |
811 | case NodeTypeForItemKind(DataImage::ITEM_EECLASS_COLD): |
812 | case NodeTypeForItemKind(DataImage::ITEM_CER_METHOD_LIST): |
813 | case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_COLD): |
814 | case NodeTypeForItemKind(DataImage::ITEM_NGEN_HASH_ENTRIES_COLD): |
815 | return CORCOMPILE_SECTION_MODULE_COLD; |
816 | |
817 | // SECTION_DEBUG_COLD |
818 | case NodeTypeForItemKind(DataImage::ITEM_DEBUG): |
819 | case NodeTypeForItemKind(DataImage::ITEM_INLINING_DATA): |
820 | return CORCOMPILE_SECTION_DEBUG_COLD; |
821 | |
822 | // SECTION_COMPRESSED_MAPS |
823 | case NodeTypeForItemKind(DataImage::ITEM_COMPRESSED_MAP): |
824 | return CORCOMPILE_SECTION_COMPRESSED_MAPS; |
825 | |
826 | default: |
827 | _ASSERTE(!"Missing mapping between type and section" ); |
828 | return CORCOMPILE_SECTION_MODULE_COLD; |
829 | } |
830 | } |
831 | |
832 | static int __cdecl LayoutOrderCmp(const void* a_, const void* b_) |
833 | { |
834 | DWORD a = ((DataImage::SavedNodeEntry*)a_)->dwAssociatedOrder; |
835 | DWORD b = ((DataImage::SavedNodeEntry*)b_)->dwAssociatedOrder; |
836 | |
837 | if (a > b) |
838 | { |
839 | return 1; |
840 | } |
841 | else |
842 | { |
843 | return (a < b) ? -1 : 0; |
844 | } |
845 | } |
846 | |
847 | void DataImage::PlaceRemainingStructures() |
848 | { |
849 | if (m_pZapImage->HasClassLayoutOrder()) |
850 | { |
851 | // The structures are currently in save order; since we are going to change |
852 | // that to class layout order, first place any that require us to maintain save order. |
853 | // Note that this is necessary because qsort is not stable. |
854 | for (COUNT_T iStructure = 0; iStructure < m_structuresInOrder.GetCount(); iStructure++) |
855 | { |
856 | if (m_structuresInOrder[iStructure].dwAssociatedOrder == MAINTAIN_SAVE_ORDER) |
857 | { |
858 | ZapNode * pStructure = m_structuresInOrder[iStructure].pNode; |
859 | if (!pStructure->IsPlaced()) |
860 | { |
861 | ZapVirtualSection * pSection = m_pZapImage->GetSection(GetSectionForNodeType(pStructure->GetType())); |
862 | pSection->Place(pStructure); |
863 | } |
864 | } |
865 | } |
866 | |
867 | qsort(&m_structuresInOrder[0], m_structuresInOrder.GetCount(), sizeof(SavedNodeEntry), LayoutOrderCmp); |
868 | } |
869 | |
870 | // Place the unplaced structures, which may have been re-sorted according to class-layout order |
871 | for (COUNT_T iStructure = 0; iStructure < m_structuresInOrder.GetCount(); iStructure++) |
872 | { |
873 | ZapNode * pStructure = m_structuresInOrder[iStructure].pNode; |
874 | if (!pStructure->IsPlaced()) |
875 | { |
876 | ZapVirtualSection * pSection = m_pZapImage->GetSection(GetSectionForNodeType(pStructure->GetType())); |
877 | pSection->Place(pStructure); |
878 | } |
879 | } |
880 | } |
881 | |
882 | int __cdecl DataImage::fixupEntryCmp(const void* a_, const void* b_) |
883 | { |
884 | LIMITED_METHOD_CONTRACT; |
885 | FixupEntry *a = (FixupEntry *)a_; |
886 | FixupEntry *b = (FixupEntry *)b_; |
887 | return (a->m_pLocation->GetRVA() + a->m_offset) - (b->m_pLocation->GetRVA() + b->m_offset); |
888 | } |
889 | |
890 | void DataImage::FixupRVAs() |
891 | { |
892 | STANDARD_VM_CONTRACT; |
893 | |
894 | FixupModuleRVAs(); |
895 | FixupRvaStructure(); |
896 | |
897 | |
898 | // Dev11 bug 181494 instrumentation |
899 | if (m_Fixups.GetCount() != m_iCurrentFixup) EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); |
900 | |
901 | qsort(&m_Fixups[0], m_Fixups.GetCount(), sizeof(FixupEntry), fixupEntryCmp); |
902 | |
903 | // Sentinel |
904 | FixupEntry entry; |
905 | |
906 | entry.m_type = 0; |
907 | entry.m_offset = 0; |
908 | entry.m_pLocation = NULL; |
909 | entry.m_pTargetNode = NULL; |
910 | |
911 | m_Fixups.Append(entry); |
912 | |
913 | // Dev11 bug 181494 instrumentation |
914 | if (m_Fixups.GetCount() -1 != m_iCurrentFixup) EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); |
915 | |
916 | m_iCurrentFixup = 0; |
917 | } |
918 | |
919 | void DataImage::SetRVAsForFields(IMetaDataEmit * pEmit) |
920 | { |
921 | for (COUNT_T i=0; i<m_rvaInfoVector.GetCount(); i++) { |
922 | |
923 | RvaInfoStructure * rvaInfo = &(m_rvaInfoVector[i]); |
924 | |
925 | void * pRVAData = rvaInfo->pFD->GetStaticAddressHandle(NULL); |
926 | |
927 | DWORD dwOffset = GetRVA(pRVAData); |
928 | |
929 | pEmit->SetRVA(rvaInfo->pFD->GetMemberDef(), dwOffset); |
930 | } |
931 | } |
932 | |
933 | void ZapStoredStructure::Save(ZapWriter * pWriter) |
934 | { |
935 | DataImage * image = ZapImage::GetImage(pWriter)->m_pDataImage; |
936 | |
937 | DataImage::FixupEntry * pPrevFixupEntry = NULL; |
938 | |
939 | for (;;) |
940 | { |
941 | DataImage::FixupEntry * pFixupEntry = &(image->m_Fixups[image->m_iCurrentFixup]); |
942 | |
943 | if (pFixupEntry->m_pLocation != this) |
944 | { |
945 | _ASSERTE(pFixupEntry->m_pLocation == NULL || |
946 | GetRVA() + GetSize() <= pFixupEntry->m_pLocation->GetRVA()); |
947 | break; |
948 | } |
949 | |
950 | PVOID pLocation = (BYTE *)GetData() + pFixupEntry->m_offset; |
951 | |
952 | if (pPrevFixupEntry == NULL || pPrevFixupEntry->m_offset != pFixupEntry->m_offset) |
953 | { |
954 | SSIZE_T targetOffset = DecodeTargetOffset(pLocation, pFixupEntry->m_type); |
955 | |
956 | #ifdef _DEBUG |
957 | // All pointers in EE datastructures should be aligned. This is important to |
958 | // avoid stradling relocations that cause issues with ASLR. |
959 | if (pFixupEntry->m_type == IMAGE_REL_BASED_PTR) |
960 | { |
961 | _ASSERTE(IS_ALIGNED(pWriter->GetCurrentRVA() + pFixupEntry->m_offset, sizeof(TADDR))); |
962 | } |
963 | #endif |
964 | |
965 | ZapImage::GetImage(pWriter)->WriteReloc( |
966 | GetData(), |
967 | pFixupEntry->m_offset, |
968 | pFixupEntry->m_pTargetNode, |
969 | (int)targetOffset, |
970 | pFixupEntry->m_type); |
971 | } |
972 | else |
973 | { |
974 | // It's fine to have duplicate fixup entries, but they must target the same data. |
975 | // If this assert fires, Fixup* was called twice on the same field in an NGen'd |
976 | // structure with different targets, which likely indicates the current structure |
977 | // was illegally interned or shared. |
978 | _ASSERTE(pPrevFixupEntry->m_type == pFixupEntry->m_type); |
979 | _ASSERTE(pPrevFixupEntry->m_pTargetNode== pFixupEntry->m_pTargetNode); |
980 | } |
981 | |
982 | pPrevFixupEntry = pFixupEntry; |
983 | image->m_iCurrentFixup++; |
984 | } |
985 | |
986 | pWriter->Write(GetData(), m_dwSize); |
987 | } |
988 | |
989 | void DataImage::FixupSectionRange(SIZE_T offset, ZapNode * pNode) |
990 | { |
991 | STANDARD_VM_CONTRACT; |
992 | |
993 | if (pNode->GetSize() != 0) |
994 | { |
995 | FixupFieldToNode(m_module->m_pNGenLayoutInfo, offset, pNode); |
996 | |
997 | SIZE_T * pSize = (SIZE_T *)((BYTE *)GetImagePointer(m_module->m_pNGenLayoutInfo) + offset + sizeof(TADDR)); |
998 | *pSize = pNode->GetSize(); |
999 | } |
1000 | } |
1001 | |
1002 | void DataImage::FixupSectionPtr(SIZE_T offset, ZapNode * pNode) |
1003 | { |
1004 | if (pNode->GetSize() != 0) |
1005 | FixupFieldToNode(m_module->m_pNGenLayoutInfo, offset, pNode); |
1006 | } |
1007 | |
1008 | void DataImage::FixupJumpStubPtr(SIZE_T offset, CorInfoHelpFunc ftnNum) |
1009 | { |
1010 | ZapNode * pNode = m_pZapImage->GetHelperThunkIfExists(ftnNum); |
1011 | if (pNode != NULL) |
1012 | FixupFieldToNode(m_module->m_pNGenLayoutInfo, offset, pNode); |
1013 | } |
1014 | |
1015 | void DataImage::FixupModuleRVAs() |
1016 | { |
1017 | STANDARD_VM_CONTRACT; |
1018 | |
1019 | FixupSectionRange(offsetof(NGenLayoutInfo, m_CodeSections[0]), m_pZapImage->m_pHotCodeSection); |
1020 | FixupSectionRange(offsetof(NGenLayoutInfo, m_CodeSections[1]), m_pZapImage->m_pCodeSection); |
1021 | FixupSectionRange(offsetof(NGenLayoutInfo, m_CodeSections[2]), m_pZapImage->m_pColdCodeSection); |
1022 | |
1023 | NGenLayoutInfo * pSavedNGenLayoutInfo = (NGenLayoutInfo *)GetImagePointer(m_module->m_pNGenLayoutInfo); |
1024 | |
1025 | COUNT_T nHotRuntimeFunctions = m_pZapImage->m_pHotRuntimeFunctionSection->GetNodeCount(); |
1026 | if (nHotRuntimeFunctions != 0) |
1027 | { |
1028 | pSavedNGenLayoutInfo->m_nRuntimeFunctions[0] = nHotRuntimeFunctions; |
1029 | |
1030 | FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_UnwindInfoLookupTable[0]), m_pZapImage->m_pHotRuntimeFunctionLookupSection); |
1031 | pSavedNGenLayoutInfo->m_UnwindInfoLookupTableEntryCount[0] = m_pZapImage->m_pHotRuntimeFunctionLookupSection->GetSize() / sizeof(DWORD) - 1; |
1032 | |
1033 | FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_MethodDescs[0]), m_pZapImage->m_pHotCodeMethodDescsSection); |
1034 | |
1035 | FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_pRuntimeFunctions[0]), m_pZapImage->m_pHotRuntimeFunctionSection); |
1036 | } |
1037 | |
1038 | COUNT_T nRuntimeFunctions = m_pZapImage->m_pRuntimeFunctionSection->GetNodeCount(); |
1039 | if (nRuntimeFunctions != 0) |
1040 | { |
1041 | pSavedNGenLayoutInfo->m_nRuntimeFunctions[1] = nRuntimeFunctions; |
1042 | |
1043 | FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_UnwindInfoLookupTable[1]), m_pZapImage->m_pRuntimeFunctionLookupSection); |
1044 | pSavedNGenLayoutInfo->m_UnwindInfoLookupTableEntryCount[1] = m_pZapImage->m_pRuntimeFunctionLookupSection->GetSize() / sizeof(DWORD) - 1; |
1045 | |
1046 | FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_MethodDescs[1]), m_pZapImage->m_pCodeMethodDescsSection); |
1047 | |
1048 | FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_pRuntimeFunctions[1]), m_pZapImage->m_pRuntimeFunctionSection); |
1049 | } |
1050 | |
1051 | COUNT_T nColdRuntimeFunctions = m_pZapImage->m_pColdRuntimeFunctionSection->GetNodeCount(); |
1052 | if (nColdRuntimeFunctions != 0) |
1053 | { |
1054 | pSavedNGenLayoutInfo->m_nRuntimeFunctions[2] = nColdRuntimeFunctions; |
1055 | |
1056 | FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_pRuntimeFunctions[2]), m_pZapImage->m_pColdRuntimeFunctionSection); |
1057 | } |
1058 | |
1059 | if (m_pZapImage->m_pColdCodeMapSection->GetNodeCount() != 0) |
1060 | { |
1061 | FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_ColdCodeMap), m_pZapImage->m_pColdCodeMapSection); |
1062 | } |
1063 | |
1064 | FixupSectionRange(offsetof(NGenLayoutInfo, m_Precodes[0]), m_pZapImage->GetSection(CORCOMPILE_SECTION_METHOD_PRECODE_HOT)); |
1065 | FixupSectionRange(offsetof(NGenLayoutInfo, m_Precodes[1]), m_pZapImage->GetSection(CORCOMPILE_SECTION_METHOD_PRECODE_COLD)); |
1066 | FixupSectionRange(offsetof(NGenLayoutInfo, m_Precodes[2]), m_pZapImage->GetSection(CORCOMPILE_SECTION_METHOD_PRECODE_WRITE)); |
1067 | FixupSectionRange(offsetof(NGenLayoutInfo, m_Precodes[3]), m_pZapImage->GetSection(CORCOMPILE_SECTION_METHOD_PRECODE_COLD_WRITEABLE)); |
1068 | |
1069 | FixupSectionRange(offsetof(NGenLayoutInfo, m_JumpStubs), m_pZapImage->m_pHelperTableSection); |
1070 | FixupSectionRange(offsetof(NGenLayoutInfo, m_StubLinkStubs), m_pZapImage->m_pStubsSection); |
1071 | FixupSectionRange(offsetof(NGenLayoutInfo, m_VirtualMethodThunks), m_pZapImage->m_pVirtualImportThunkSection); |
1072 | FixupSectionRange(offsetof(NGenLayoutInfo, m_ExternalMethodThunks), m_pZapImage->m_pExternalMethodThunkSection); |
1073 | |
1074 | if (m_pZapImage->m_pExceptionInfoLookupTable->GetSize() != 0) |
1075 | FixupSectionRange(offsetof(NGenLayoutInfo, m_ExceptionInfoLookupTable), m_pZapImage->m_pExceptionInfoLookupTable); |
1076 | |
1077 | FixupJumpStubPtr(offsetof(NGenLayoutInfo, m_pPrestubJumpStub), CORINFO_HELP_EE_PRESTUB); |
1078 | #ifdef HAS_FIXUP_PRECODE |
1079 | FixupJumpStubPtr(offsetof(NGenLayoutInfo, m_pPrecodeFixupJumpStub), CORINFO_HELP_EE_PRECODE_FIXUP); |
1080 | #endif |
1081 | FixupJumpStubPtr(offsetof(NGenLayoutInfo, m_pVirtualImportFixupJumpStub), CORINFO_HELP_EE_VTABLE_FIXUP); |
1082 | FixupJumpStubPtr(offsetof(NGenLayoutInfo, m_pExternalMethodFixupJumpStub), CORINFO_HELP_EE_EXTERNAL_FIXUP); |
1083 | |
1084 | ZapNode * pFilterPersonalityRoutine = m_pZapImage->GetHelperThunkIfExists(CORINFO_HELP_EE_PERSONALITY_ROUTINE_FILTER_FUNCLET); |
1085 | if (pFilterPersonalityRoutine != NULL) |
1086 | FixupFieldToNode(m_module->m_pNGenLayoutInfo, offsetof(NGenLayoutInfo, m_rvaFilterPersonalityRoutine), pFilterPersonalityRoutine, 0, IMAGE_REL_BASED_ABSOLUTE); |
1087 | } |
1088 | |
1089 | void DataImage::FixupRvaStructure() |
1090 | { |
1091 | STANDARD_VM_CONTRACT; |
1092 | |
1093 | for (COUNT_T i=0; i<m_rvaInfoVector.GetCount(); i++) { |
1094 | |
1095 | RvaInfoStructure * rvaInfo = &(m_rvaInfoVector[i]); |
1096 | |
1097 | void * pRVAData = rvaInfo->pFD->GetStaticAddressHandle(NULL); |
1098 | |
1099 | DWORD dwOffset = GetRVA(pRVAData); |
1100 | |
1101 | FieldDesc * pNewFD = (FieldDesc *)GetImagePointer(rvaInfo->pFD); |
1102 | pNewFD->SetOffset(dwOffset); |
1103 | } |
1104 | } |
1105 | |
1106 | ZapNode * DataImage::GetCodeAddress(MethodDesc * method) |
1107 | { |
1108 | ZapMethodHeader * pMethod = m_pZapImage->GetCompiledMethod((CORINFO_METHOD_HANDLE)method); |
1109 | return (pMethod != NULL) ? pMethod->GetCode() : NULL; |
1110 | } |
1111 | |
1112 | BOOL DataImage::CanDirectCall(MethodDesc * method, CORINFO_ACCESS_FLAGS accessFlags) |
1113 | { |
1114 | return m_pZapImage->canIntraModuleDirectCall(NULL, (CORINFO_METHOD_HANDLE)method, NULL, accessFlags); |
1115 | } |
1116 | |
1117 | ZapNode * DataImage::GetFixupList(MethodDesc * method) |
1118 | { |
1119 | ZapMethodHeader * pMethod = m_pZapImage->GetCompiledMethod((CORINFO_METHOD_HANDLE)method); |
1120 | return (pMethod != NULL) ? pMethod->GetFixupList() : NULL; |
1121 | } |
1122 | |
1123 | ZapNode * DataImage::GetHelperThunk(CorInfoHelpFunc ftnNum) |
1124 | { |
1125 | return m_pZapImage->GetHelperThunk(ftnNum); |
1126 | } |
1127 | |
1128 | ZapNode * DataImage::GetTypeHandleImport(TypeHandle th, PVOID pUniqueId) |
1129 | { |
1130 | ZapImport * pImport = m_pZapImage->GetImportTable()->GetClassHandleImport(CORINFO_CLASS_HANDLE(th.AsPtr()), pUniqueId); |
1131 | if (!pImport->IsPlaced()) |
1132 | m_pZapImage->GetImportTable()->PlaceImport(pImport); |
1133 | return pImport; |
1134 | } |
1135 | |
1136 | ZapNode * DataImage::GetMethodHandleImport(MethodDesc * pMD) |
1137 | { |
1138 | ZapImport * pImport = m_pZapImage->GetImportTable()->GetMethodHandleImport(CORINFO_METHOD_HANDLE(pMD)); |
1139 | if (!pImport->IsPlaced()) |
1140 | m_pZapImage->GetImportTable()->PlaceImport(pImport); |
1141 | return pImport; |
1142 | } |
1143 | |
1144 | ZapNode * DataImage::GetFieldHandleImport(FieldDesc * pMD) |
1145 | { |
1146 | ZapImport * pImport = m_pZapImage->GetImportTable()->GetFieldHandleImport(CORINFO_FIELD_HANDLE(pMD)); |
1147 | if (!pImport->IsPlaced()) |
1148 | m_pZapImage->GetImportTable()->PlaceImport(pImport); |
1149 | return pImport; |
1150 | } |
1151 | |
1152 | ZapNode * DataImage::GetModuleHandleImport(Module * pModule) |
1153 | { |
1154 | ZapImport * pImport = m_pZapImage->GetImportTable()->GetModuleHandleImport(CORINFO_MODULE_HANDLE(pModule)); |
1155 | if (!pImport->IsPlaced()) |
1156 | m_pZapImage->GetImportTable()->PlaceImport(pImport); |
1157 | return pImport; |
1158 | } |
1159 | |
1160 | DWORD DataImage::GetModuleImportIndex(Module * pModule) |
1161 | { |
1162 | return m_pZapImage->GetImportTable()->GetIndexOfModule((CORINFO_MODULE_HANDLE)pModule); |
1163 | } |
1164 | |
1165 | ZapNode * DataImage::GetExistingTypeHandleImport(TypeHandle th) |
1166 | { |
1167 | ZapImport * pImport = m_pZapImage->GetImportTable()->GetExistingClassHandleImport(CORINFO_CLASS_HANDLE(th.AsPtr())); |
1168 | return (pImport != NULL && pImport->IsPlaced()) ? pImport : NULL; |
1169 | } |
1170 | |
1171 | ZapNode * DataImage::GetExistingMethodHandleImport(MethodDesc * pMD) |
1172 | { |
1173 | ZapImport * pImport = m_pZapImage->GetImportTable()->GetExistingMethodHandleImport(CORINFO_METHOD_HANDLE(pMD)); |
1174 | return (pImport != NULL && pImport->IsPlaced()) ? pImport : NULL; |
1175 | } |
1176 | |
1177 | ZapNode * DataImage::GetExistingFieldHandleImport(FieldDesc * pFD) |
1178 | { |
1179 | ZapImport * pImport = m_pZapImage->GetImportTable()->GetExistingFieldHandleImport(CORINFO_FIELD_HANDLE(pFD)); |
1180 | return (pImport != NULL && pImport->IsPlaced()) ? pImport : NULL; |
1181 | } |
1182 | |
1183 | ZapNode * DataImage::GetVirtualImportThunk(MethodTable * pMT, MethodDesc * pMD, int slotNumber) |
1184 | { |
1185 | _ASSERTE(pMD == pMT->GetMethodDescForSlot(slotNumber)); |
1186 | _ASSERTE(!pMD->IsGenericMethodDefinition()); |
1187 | |
1188 | ZapImport * pImport = m_pZapImage->GetImportTable()->GetVirtualImportThunk(CORINFO_METHOD_HANDLE(pMD), slotNumber); |
1189 | if (!pImport->IsPlaced()) |
1190 | m_pZapImage->GetImportTable()->PlaceVirtualImportThunk(pImport); |
1191 | return pImport; |
1192 | } |
1193 | |
1194 | ZapNode * DataImage::GetGenericSignature(PVOID signature, BOOL fMethod) |
1195 | { |
1196 | ZapGenericSignature * pGenericSignature = m_pZapImage->GetImportTable()->GetGenericSignature(signature, fMethod); |
1197 | if (!pGenericSignature->IsPlaced()) |
1198 | m_pZapImage->GetImportTable()->PlaceBlob(pGenericSignature); |
1199 | return pGenericSignature; |
1200 | } |
1201 | |
1202 | #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) |
1203 | |
1204 | class ZapStubPrecode : public ZapNode |
1205 | { |
1206 | protected: |
1207 | MethodDesc * m_pMD; |
1208 | DataImage::ItemKind m_kind; |
1209 | |
1210 | public: |
1211 | ZapStubPrecode(MethodDesc * pMethod, DataImage::ItemKind kind) |
1212 | : m_pMD(pMethod), m_kind(kind) |
1213 | { |
1214 | } |
1215 | |
1216 | virtual DWORD GetSize() |
1217 | { |
1218 | return sizeof(StubPrecode); |
1219 | } |
1220 | |
1221 | virtual UINT GetAlignment() |
1222 | { |
1223 | return PRECODE_ALIGNMENT; |
1224 | } |
1225 | |
1226 | virtual ZapNodeType GetType() |
1227 | { |
1228 | return NodeTypeForItemKind(m_kind); |
1229 | } |
1230 | |
1231 | virtual DWORD ComputeRVA(ZapWriter * pZapWriter, DWORD dwPos) |
1232 | { |
1233 | dwPos = AlignUp(dwPos, GetAlignment()); |
1234 | |
1235 | // Alignment for straddlers. Need a cast to help gcc choose between AlignmentTrim(UINT,UINT) and (UINT64,UINT). |
1236 | if (AlignmentTrim(static_cast<UINT>(dwPos + offsetof(StubPrecode, m_pMethodDesc)), RELOCATION_PAGE_SIZE) > RELOCATION_PAGE_SIZE - sizeof(TADDR)) |
1237 | dwPos += GetAlignment(); |
1238 | |
1239 | SetRVA(dwPos); |
1240 | |
1241 | dwPos += GetSize(); |
1242 | |
1243 | return dwPos; |
1244 | } |
1245 | |
1246 | virtual void Save(ZapWriter * pZapWriter) |
1247 | { |
1248 | ZapImage * pImage = ZapImage::GetImage(pZapWriter); |
1249 | |
1250 | StubPrecode precode; |
1251 | |
1252 | precode.Init(m_pMD); |
1253 | |
1254 | SSIZE_T offset; |
1255 | ZapNode * pNode = pImage->m_pDataImage->GetNodeForStructure(m_pMD, &offset); |
1256 | pImage->WriteReloc(&precode, offsetof(StubPrecode, m_pMethodDesc), |
1257 | pNode, (int)offset, IMAGE_REL_BASED_PTR); |
1258 | |
1259 | pImage->WriteReloc(&precode, offsetof(StubPrecode, m_rel32), |
1260 | pImage->GetHelperThunk(CORINFO_HELP_EE_PRESTUB), 0, IMAGE_REL_BASED_REL32); |
1261 | |
1262 | pZapWriter->Write(&precode, sizeof(precode)); |
1263 | } |
1264 | }; |
1265 | |
1266 | #ifdef HAS_NDIRECT_IMPORT_PRECODE |
1267 | class ZapNDirectImportPrecode : public ZapStubPrecode |
1268 | { |
1269 | public: |
1270 | ZapNDirectImportPrecode(MethodDesc * pMD, DataImage::ItemKind kind) |
1271 | : ZapStubPrecode(pMD, kind) |
1272 | { |
1273 | } |
1274 | |
1275 | virtual void Save(ZapWriter * pZapWriter) |
1276 | { |
1277 | ZapImage * pImage = ZapImage::GetImage(pZapWriter); |
1278 | |
1279 | StubPrecode precode; |
1280 | |
1281 | precode.Init(m_pMD); |
1282 | |
1283 | SSIZE_T offset; |
1284 | ZapNode * pNode = pImage->m_pDataImage->GetNodeForStructure(m_pMD, &offset); |
1285 | pImage->WriteReloc(&precode, offsetof(StubPrecode, m_pMethodDesc), |
1286 | pNode, (int)offset, IMAGE_REL_BASED_PTR); |
1287 | |
1288 | pImage->WriteReloc(&precode, offsetof(StubPrecode, m_rel32), |
1289 | pImage->GetHelperThunk(CORINFO_HELP_EE_PINVOKE_FIXUP), 0, IMAGE_REL_BASED_REL32); |
1290 | |
1291 | pZapWriter->Write(&precode, sizeof(precode)); |
1292 | } |
1293 | }; |
1294 | #endif // HAS_NDIRECT_IMPORT_PRECODE |
1295 | |
1296 | void DataImage::SavePrecode(PVOID ptr, MethodDesc * pMD, PrecodeType t, ItemKind kind, BOOL fIsPrebound) |
1297 | { |
1298 | ZapNode * pNode = NULL; |
1299 | |
1300 | switch (t) { |
1301 | case PRECODE_STUB: |
1302 | pNode = new (GetHeap()) ZapStubPrecode(pMD, kind); |
1303 | GetHelperThunk(CORINFO_HELP_EE_PRESTUB); |
1304 | break; |
1305 | |
1306 | #ifdef HAS_NDIRECT_IMPORT_PRECODE |
1307 | case PRECODE_NDIRECT_IMPORT: |
1308 | pNode = new (GetHeap()) ZapNDirectImportPrecode(pMD, kind); |
1309 | GetHelperThunk(CORINFO_HELP_EE_PINVOKE_FIXUP); |
1310 | break; |
1311 | #endif // HAS_NDIRECT_IMPORT_PRECODE |
1312 | |
1313 | default: |
1314 | _ASSERTE(!"Unexpected precode type" ); |
1315 | break; |
1316 | } |
1317 | |
1318 | BindPointer(ptr, pNode, 0); |
1319 | |
1320 | AddStructureInOrder(pNode); |
1321 | } |
1322 | |
1323 | #endif // _TARGET_X86_ || _TARGET_AMD64_ |
1324 | |
1325 | void DataImage::FixupModulePointer(Module * pModule, PVOID p, SSIZE_T offset, ZapRelocationType type) |
1326 | { |
1327 | STANDARD_VM_CONTRACT; |
1328 | |
1329 | if (pModule != NULL) |
1330 | { |
1331 | if (CanEagerBindToModule(pModule) && CanHardBindToZapModule(pModule)) |
1332 | { |
1333 | FixupField(p, offset, pModule, 0, type); |
1334 | } |
1335 | else |
1336 | { |
1337 | ZapNode * pImport = GetModuleHandleImport(pModule); |
1338 | FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type); |
1339 | } |
1340 | } |
1341 | } |
1342 | |
1343 | void DataImage::FixupMethodTablePointer(MethodTable * pMT, PVOID p, SSIZE_T offset, ZapRelocationType type) |
1344 | { |
1345 | STANDARD_VM_CONTRACT; |
1346 | |
1347 | if (pMT != NULL) |
1348 | { |
1349 | if (CanEagerBindToMethodTable(pMT) && CanHardBindToZapModule(pMT->GetLoaderModule())) |
1350 | { |
1351 | FixupField(p, offset, pMT, 0, type); |
1352 | } |
1353 | else |
1354 | { |
1355 | ZapNode * pImport = GetTypeHandleImport(pMT); |
1356 | FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type); |
1357 | } |
1358 | } |
1359 | } |
1360 | |
1361 | void DataImage::FixupTypeHandlePointer(TypeHandle th, PVOID p, SSIZE_T offset, ZapRelocationType type) |
1362 | { |
1363 | STANDARD_VM_CONTRACT; |
1364 | |
1365 | if (!th.IsNull()) |
1366 | { |
1367 | if (th.IsTypeDesc()) |
1368 | { |
1369 | if (CanEagerBindToTypeHandle(th) && CanHardBindToZapModule(th.GetLoaderModule())) |
1370 | { |
1371 | FixupField(p, offset, th.AsTypeDesc(), 2, type); |
1372 | } |
1373 | else |
1374 | { |
1375 | ZapNode * pImport = GetTypeHandleImport(th); |
1376 | FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type); |
1377 | } |
1378 | } |
1379 | else |
1380 | { |
1381 | MethodTable * pMT = th.AsMethodTable(); |
1382 | FixupMethodTablePointer(pMT, p, offset, type); |
1383 | } |
1384 | } |
1385 | } |
1386 | |
1387 | void DataImage::FixupMethodDescPointer(MethodDesc * pMD, PVOID p, SSIZE_T offset, ZapRelocationType type /*=IMAGE_REL_BASED_PTR*/) |
1388 | { |
1389 | STANDARD_VM_CONTRACT; |
1390 | |
1391 | if (pMD != NULL) |
1392 | { |
1393 | if (CanEagerBindToMethodDesc(pMD) && CanHardBindToZapModule(pMD->GetLoaderModule())) |
1394 | { |
1395 | FixupField(p, offset, pMD, 0, type); |
1396 | } |
1397 | else |
1398 | { |
1399 | ZapNode * pImport = GetMethodHandleImport(pMD); |
1400 | FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type); |
1401 | } |
1402 | } |
1403 | } |
1404 | |
1405 | void DataImage::FixupFieldDescPointer(FieldDesc * pFD, PVOID p, SSIZE_T offset, ZapRelocationType type /*=IMAGE_REL_BASED_PTR*/) |
1406 | { |
1407 | STANDARD_VM_CONTRACT; |
1408 | |
1409 | if (pFD != NULL) |
1410 | { |
1411 | if (CanEagerBindToFieldDesc(pFD) && CanHardBindToZapModule(pFD->GetLoaderModule())) |
1412 | { |
1413 | FixupField(p, offset, pFD, 0, type); |
1414 | } |
1415 | else |
1416 | { |
1417 | ZapNode * pImport = GetFieldHandleImport(pFD); |
1418 | FixupFieldToNode(p, offset, pImport, FIXUP_POINTER_INDIRECTION, type); |
1419 | } |
1420 | } |
1421 | } |
1422 | |
1423 | void DataImage::FixupMethodTablePointer(PVOID p, FixupPointer<PTR_MethodTable> * ppMT) |
1424 | { |
1425 | FixupMethodTablePointer(ppMT->GetValue(), p, (BYTE *)ppMT - (BYTE *)p, IMAGE_REL_BASED_PTR); |
1426 | } |
1427 | void DataImage::FixupTypeHandlePointer(PVOID p, FixupPointer<TypeHandle> * pth) |
1428 | { |
1429 | FixupTypeHandlePointer(pth->GetValue(), p, (BYTE *)pth - (BYTE *)p, IMAGE_REL_BASED_PTR); |
1430 | } |
1431 | void DataImage::FixupMethodDescPointer(PVOID p, FixupPointer<PTR_MethodDesc> * ppMD) |
1432 | { |
1433 | FixupMethodDescPointer(ppMD->GetValue(), p, (BYTE *)ppMD - (BYTE *)p, IMAGE_REL_BASED_PTR); |
1434 | } |
1435 | void DataImage::FixupFieldDescPointer(PVOID p, FixupPointer<PTR_FieldDesc> * ppFD) |
1436 | { |
1437 | FixupFieldDescPointer(ppFD->GetValue(), p, (BYTE *)ppFD - (BYTE *)p, IMAGE_REL_BASED_PTR); |
1438 | } |
1439 | |
1440 | void DataImage::FixupModulePointer(PVOID p, RelativeFixupPointer<PTR_Module> * ppModule) |
1441 | { |
1442 | FixupModulePointer(ppModule->GetValueMaybeNull(), p, (BYTE *)ppModule - (BYTE *)p, IMAGE_REL_BASED_RELPTR); |
1443 | } |
1444 | void DataImage::FixupMethodTablePointer(PVOID p, RelativeFixupPointer<PTR_MethodTable> * ppMT) |
1445 | { |
1446 | FixupMethodTablePointer(ppMT->GetValueMaybeNull(), p, (BYTE *)ppMT - (BYTE *)p, IMAGE_REL_BASED_RELPTR); |
1447 | } |
1448 | void DataImage::FixupTypeHandlePointer(PVOID p, RelativeFixupPointer<TypeHandle> * pth) |
1449 | { |
1450 | FixupTypeHandlePointer(pth->GetValueMaybeNull(), p, (BYTE *)pth - (BYTE *)p, IMAGE_REL_BASED_RELPTR); |
1451 | } |
1452 | void DataImage::FixupMethodDescPointer(PVOID p, RelativeFixupPointer<PTR_MethodDesc> * ppMD) |
1453 | { |
1454 | FixupMethodDescPointer(ppMD->GetValueMaybeNull(), p, (BYTE *)ppMD - (BYTE *)p, IMAGE_REL_BASED_RELPTR); |
1455 | } |
1456 | void DataImage::FixupFieldDescPointer(PVOID p, RelativeFixupPointer<PTR_FieldDesc> * ppFD) |
1457 | { |
1458 | FixupFieldDescPointer(ppFD->GetValueMaybeNull(), p, (BYTE *)ppFD - (BYTE *)p, IMAGE_REL_BASED_RELPTR); |
1459 | } |
1460 | |
1461 | BOOL DataImage::CanHardBindToZapModule(Module *targetModule) |
1462 | { |
1463 | STANDARD_VM_CONTRACT; |
1464 | |
1465 | _ASSERTE(targetModule == m_module || targetModule->HasNativeImage()); |
1466 | return targetModule == m_module; |
1467 | } |
1468 | |
1469 | BOOL DataImage::CanEagerBindToTypeHandle(TypeHandle th, BOOL fRequirePrerestore, TypeHandleList *pVisited) |
1470 | { |
1471 | STANDARD_VM_CONTRACT; |
1472 | |
1473 | Module * pLoaderModule = th.GetLoaderModule(); |
1474 | |
1475 | BOOL fCanEagerBind; |
1476 | |
1477 | if (th.IsTypeDesc()) |
1478 | { |
1479 | fCanEagerBind = CanEagerBindTo(pLoaderModule, Module::GetPreferredZapModuleForTypeDesc(th.AsTypeDesc()), th.AsTypeDesc()); |
1480 | } |
1481 | else |
1482 | { |
1483 | fCanEagerBind = CanEagerBindTo(pLoaderModule, Module::GetPreferredZapModuleForMethodTable(th.AsMethodTable()), th.AsMethodTable()); |
1484 | } |
1485 | |
1486 | if (GetModule() != th.GetLoaderModule()) |
1487 | { |
1488 | if (th.IsTypeDesc()) |
1489 | { |
1490 | return FALSE; |
1491 | } |
1492 | |
1493 | // As a performance optimization, don't eager bind to arrays. They are currently very expensive to |
1494 | // fixup so we want to do it lazily. |
1495 | |
1496 | if (th.AsMethodTable()->IsArray()) |
1497 | { |
1498 | return FALSE; |
1499 | } |
1500 | |
1501 | // For correctness in the face of targeted patching, do not eager bind to any instantiation |
1502 | // in the target module that might go away. |
1503 | if (!th.IsTypicalTypeDefinition() && |
1504 | !Module::IsAlwaysSavedInPreferredZapModule(th.GetInstantiation(), |
1505 | Instantiation())) |
1506 | { |
1507 | return FALSE; |
1508 | } |
1509 | |
1510 | // #DoNotEagerBindToTypesThatNeedRestore |
1511 | // |
1512 | // It is important to avoid eager binding to structures that require restore. The code here stops |
1513 | // this from happening for cross-module fixups. For intra-module cases, eager fixups are allowed to |
1514 | // (and often do) target types that require restore, even though this is generally prone to all of |
1515 | // the same problems described below. Correctness is preserved only because intra-module eager |
1516 | // fixups are ignored in Module::RunEagerFixups (so their semantics are very close to normal |
1517 | // non-eager fixups). |
1518 | // |
1519 | // For performance, this is the most costly type of eager fixup (and may require otherwise-unneeded |
1520 | // assemblies to be loaded) and has the lowest benefit, since it does not avoid the need for the |
1521 | // referencing type to require restore. |
1522 | // |
1523 | // More importantly, this kind of fixup can compromise correctness by causing type loads to occur |
1524 | // during eager fixup resolution. The system is not designed to cope with this and a variety of |
1525 | // subtle failures can occur when it happens. As an example, consider a scenario involving the |
1526 | // following assemblies and types: |
1527 | // o A1: softbinds to A2, contains "class A1!Level2 extends A2!Level1" |
1528 | // o A2: hardbinds to A3, contains "class A2!Level1 extends Object", contains methods that use A3!Level3. |
1529 | // o A3: softbinds to A1, contains "class A3!Level3 extends A1!Level2" |
1530 | // |
1531 | // If eager fixups are allowed to target types that need restore, then it's possible for A2 to end |
1532 | // up with an eager fixup targeting A3!Level3, setting up this sequence: |
1533 | // 1 Type load starts for A1!Level2. |
1534 | // 2 Loading base class A2!Level1 triggers assembly load for A2. |
1535 | // 3 Loading A2 involves synchronously resolving its eager fixups, including the fixup to A3!Level3. |
1536 | // 4 A3!Level3 needs restore, so type load starts for A3!Level3. |
1537 | // 5 Loading A3!Level3 requires loading base class A1!Level2. |
1538 | // 6 A1!Level2 is already being loaded on this thread (in #1 above), so type load fails. |
1539 | // 7 Since eager fixup resolution failed, FileLoadException is thrown for A2. |
1540 | fRequirePrerestore = TRUE; |
1541 | } |
1542 | |
1543 | if (fCanEagerBind && fRequirePrerestore) |
1544 | { |
1545 | fCanEagerBind = !th.ComputeNeedsRestore(this, pVisited); |
1546 | } |
1547 | |
1548 | return fCanEagerBind; |
1549 | } |
1550 | |
1551 | BOOL DataImage::CanEagerBindToMethodTable(MethodTable *pMT, BOOL fRequirePrerestore, TypeHandleList *pVisited) |
1552 | { |
1553 | WRAPPER_NO_CONTRACT; |
1554 | |
1555 | TypeHandle th = TypeHandle(pMT); |
1556 | return DataImage::CanEagerBindToTypeHandle(th, fRequirePrerestore, pVisited); |
1557 | } |
1558 | |
1559 | BOOL DataImage::CanEagerBindToMethodDesc(MethodDesc *pMD, BOOL fRequirePrerestore, TypeHandleList *pVisited) |
1560 | { |
1561 | STANDARD_VM_CONTRACT; |
1562 | |
1563 | BOOL fCanEagerBind = CanEagerBindTo(pMD->GetLoaderModule(), Module::GetPreferredZapModuleForMethodDesc(pMD), pMD); |
1564 | |
1565 | // Performance optimization -- see comment in CanEagerBindToTypeHandle |
1566 | if (GetModule() != pMD->GetLoaderModule()) |
1567 | { |
1568 | // For correctness in the face of targeted patching, do not eager bind to any instantiation |
1569 | // in the target module that might go away. |
1570 | if (!pMD->IsTypicalMethodDefinition() && |
1571 | !Module::IsAlwaysSavedInPreferredZapModule(pMD->GetClassInstantiation(), |
1572 | pMD->GetMethodInstantiation())) |
1573 | { |
1574 | return FALSE; |
1575 | } |
1576 | |
1577 | fRequirePrerestore = TRUE; |
1578 | } |
1579 | |
1580 | if (fCanEagerBind && fRequirePrerestore) |
1581 | { |
1582 | fCanEagerBind = !pMD->ComputeNeedsRestore(this, pVisited); |
1583 | } |
1584 | |
1585 | return fCanEagerBind; |
1586 | } |
1587 | |
1588 | BOOL DataImage::CanEagerBindToFieldDesc(FieldDesc *pFD, BOOL fRequirePrerestore, TypeHandleList *pVisited) |
1589 | { |
1590 | STANDARD_VM_CONTRACT; |
1591 | |
1592 | if (!CanEagerBindTo(pFD->GetLoaderModule(), Module::GetPreferredZapModuleForFieldDesc(pFD), pFD)) |
1593 | return FALSE; |
1594 | |
1595 | MethodTable * pMT = pFD->GetApproxEnclosingMethodTable(); |
1596 | |
1597 | return CanEagerBindToMethodTable(pMT, fRequirePrerestore, pVisited); |
1598 | } |
1599 | |
1600 | BOOL DataImage::CanEagerBindToModule(Module *pModule) |
1601 | { |
1602 | STANDARD_VM_CONTRACT; |
1603 | |
1604 | return GetAppDomain()->ToCompilationDomain()->CanEagerBindToZapFile(pModule); |
1605 | } |
1606 | |
1607 | // "address" is a data-structure belonging to pTargetModule. |
1608 | // This function returns whether the Module currently being ngenned can |
1609 | // hardbind "address" |
1610 | /* static */ |
1611 | BOOL DataImage::CanEagerBindTo(Module *pTargetModule, Module *pPreferredZapModule, void *address) |
1612 | { |
1613 | STANDARD_VM_CONTRACT; |
1614 | |
1615 | if (pTargetModule != pPreferredZapModule) |
1616 | return FALSE; |
1617 | |
1618 | if (GetModule() == pTargetModule) |
1619 | return TRUE; |
1620 | |
1621 | BOOL eagerBindToZap = GetAppDomain()->ToCompilationDomain()->CanEagerBindToZapFile(pTargetModule); |
1622 | BOOL isPersisted = pTargetModule->IsPersistedObject(address); |
1623 | |
1624 | return eagerBindToZap && isPersisted; |
1625 | } |
1626 | |
1627 | BOOL DataImage::CanPrerestoreEagerBindToTypeHandle(TypeHandle th, TypeHandleList *pVisited) |
1628 | { |
1629 | WRAPPER_NO_CONTRACT; |
1630 | return CanEagerBindToTypeHandle(th, TRUE, pVisited); |
1631 | } |
1632 | |
1633 | BOOL DataImage::CanPrerestoreEagerBindToMethodTable(MethodTable *pMT, TypeHandleList *pVisited) |
1634 | { |
1635 | WRAPPER_NO_CONTRACT; |
1636 | return CanEagerBindToMethodTable(pMT, TRUE, pVisited); |
1637 | } |
1638 | |
1639 | BOOL DataImage::CanPrerestoreEagerBindToMethodDesc(MethodDesc *pMD, TypeHandleList *pVisited) |
1640 | { |
1641 | WRAPPER_NO_CONTRACT; |
1642 | return CanEagerBindToMethodDesc(pMD, TRUE, pVisited); |
1643 | } |
1644 | |
1645 | |
1646 | void DataImage::HardBindTypeHandlePointer(PVOID p, SSIZE_T offset) |
1647 | { |
1648 | CONTRACTL |
1649 | { |
1650 | STANDARD_VM_CHECK; |
1651 | PRECONDITION(CanEagerBindToTypeHandle(*(TypeHandle UNALIGNED*)((BYTE *)p + offset))); |
1652 | } |
1653 | CONTRACTL_END; |
1654 | |
1655 | TypeHandle thCopy = *(TypeHandle UNALIGNED*)((BYTE *)p + offset); |
1656 | |
1657 | if (!thCopy.IsNull()) |
1658 | { |
1659 | if (thCopy.IsTypeDesc()) |
1660 | { |
1661 | FixupField(p, offset, thCopy.AsTypeDesc(), 2); |
1662 | } |
1663 | else |
1664 | { |
1665 | FixupField(p, offset, thCopy.AsMethodTable()); |
1666 | } |
1667 | } |
1668 | } |
1669 | |
1670 | |
1671 | // This is obsolete in-place fixup that we should get rid of. For now, it is used for: |
1672 | // - FnPtrTypeDescs. These should not be stored in NGen images at all. |
1673 | // - stubs-as-il signatures. These should use tokens when stored in NGen image. |
1674 | // |
1675 | void DataImage::FixupTypeHandlePointerInPlace(PVOID p, SSIZE_T offset, BOOL fForceFixup /*=FALSE*/) |
1676 | { |
1677 | STANDARD_VM_CONTRACT; |
1678 | |
1679 | TypeHandle thCopy = *(TypeHandle UNALIGNED*)((BYTE *)p + offset); |
1680 | |
1681 | if (!thCopy.IsNull()) |
1682 | { |
1683 | if (!fForceFixup && |
1684 | CanEagerBindToTypeHandle(thCopy) && |
1685 | CanHardBindToZapModule(thCopy.GetLoaderModule())) |
1686 | { |
1687 | HardBindTypeHandlePointer(p, offset); |
1688 | } |
1689 | else |
1690 | { |
1691 | ZapImport * pImport = m_pZapImage->GetImportTable()->GetClassHandleImport((CORINFO_CLASS_HANDLE)thCopy.AsPtr()); |
1692 | |
1693 | ZapNode * pBlob = m_pZapImage->GetImportTable()->PlaceImportBlob(pImport); |
1694 | FixupFieldToNode(p, offset, pBlob, 0, IMAGE_REL_BASED_ABSOLUTE_TAGGED); |
1695 | } |
1696 | } |
1697 | } |
1698 | |
1699 | void DataImage::BeginRegion(CorInfoRegionKind regionKind) |
1700 | { |
1701 | STANDARD_VM_CONTRACT; |
1702 | |
1703 | m_pZapImage->BeginRegion(regionKind); |
1704 | } |
1705 | |
1706 | void DataImage::EndRegion(CorInfoRegionKind regionKind) |
1707 | { |
1708 | STANDARD_VM_CONTRACT; |
1709 | |
1710 | m_pZapImage->EndRegion(regionKind); |
1711 | } |
1712 | |
1713 | void DataImage::ReportInlining(CORINFO_METHOD_HANDLE inliner, CORINFO_METHOD_HANDLE inlinee) |
1714 | { |
1715 | STANDARD_VM_CONTRACT; |
1716 | _ASSERTE(m_inlineTrackingMap); |
1717 | m_inlineTrackingMap->AddInlining(GetMethod(inliner), GetMethod(inlinee)); |
1718 | } |
1719 | |
1720 | InlineTrackingMap * DataImage::GetInlineTrackingMap() |
1721 | { |
1722 | LIMITED_METHOD_DAC_CONTRACT; |
1723 | return m_inlineTrackingMap; |
1724 | } |
1725 | |
1726 | // |
1727 | // Compressed LookupMap Support |
1728 | // |
1729 | // See the large comment near the top of ceeload.h for a much more detailed discussion of this. |
1730 | // |
1731 | // Basically we support a specialized node, ZapCompressedLookupMap, which knows how to compress the array of |
1732 | // intra-module pointers present in certain types of LookupMap. |
1733 | // |
1734 | |
1735 | // A simple class to write a sequential sequence of variable sized bit-fields into a pre-allocated buffer. I |
1736 | // was going to use the version defined by GcInfoEncoder (the reader side in ceeload.cpp uses GcInfoDecoder's |
1737 | // BitStreamReader) but unfortunately the code is not currently factored to make this easy and the resources |
1738 | // were not available to perform a non-trivial refactorization of the code. In any event the writer is fairly |
1739 | // trivial and doesn't represent a huge duplication of effort. |
1740 | // The class requires that the input buffer is DWORD-aligned and sized (it uses a DWORD cache and always |
1741 | // writes data to the buffer in DWORD-sized chunks). |
1742 | class BitStreamWriter |
1743 | { |
1744 | public: |
1745 | // Initialize a writer and point it at the start of a pre-allocated buffer (large enough to accomodate all |
1746 | // future writes). The buffer must be DWORD-aligned (we use this for some performance optimization). |
1747 | BitStreamWriter(DWORD *pStart) |
1748 | { |
1749 | LIMITED_METHOD_CONTRACT; |
1750 | |
1751 | // Buffer must be DWORD-aligned. |
1752 | _ASSERTE(((TADDR)pStart & 0x3) == 0); |
1753 | |
1754 | m_pNext = pStart; // Point at the start of the buffer |
1755 | m_dwCurrent = 0; // We don't have any cached data waiting to write |
1756 | m_cCurrentBits = 0; // Ditto |
1757 | m_cBitsWritten = 0; // We haven't written any bits |
1758 | } |
1759 | |
1760 | // Write the low-order cBits of dwData to the stream. |
1761 | void Write(DWORD dwData, DWORD cBits) |
1762 | { |
1763 | LIMITED_METHOD_CONTRACT; |
1764 | |
1765 | // We can only write between 1 and 32 bits of data at a time. |
1766 | _ASSERTE(cBits > 0 && cBits <= kBitsPerDWORD); |
1767 | |
1768 | // Check that none of the unused high-order bits of dwData have stale data in them (we can use this to |
1769 | // optimize paths below). Use two conditions here because << of 32-bits or more (on x86) doesn't |
1770 | // do what you might expect (the RHS is modulo 32 so "<< 32" is a no-op rather than zero-ing the |
1771 | // result). |
1772 | _ASSERTE((cBits == kBitsPerDWORD) || ((dwData & ((1U << cBits) - 1)) == dwData)); |
1773 | |
1774 | // Record the input bits as written (we can't fail and we have multiple exit paths below so it's |
1775 | // convenient to update our counter here). |
1776 | m_cBitsWritten += cBits; |
1777 | |
1778 | // We cache up to a DWORD of data to be written to the stream and only write back to the buffer when |
1779 | // we have a full DWORD. Calculate how many bits of the input we're going to write first (either the |
1780 | // rest of the input or the remaining bits of space in the current DWORD cache, whichever is smaller). |
1781 | DWORD cInitialBits = min(cBits, kBitsPerDWORD - m_cCurrentBits); |
1782 | if (cInitialBits == kBitsPerDWORD) |
1783 | { |
1784 | // Deal with this special case (we're writing all the input, an entire DWORD all at once) since it |
1785 | // ensures that none of the << operations below have to deal with a LHS that == 32 (see the << |
1786 | // comment in one of the asserts above for why this matters). |
1787 | |
1788 | // Because of the calculations above we should only come here if our DWORD cache was empty and the |
1789 | // caller is trying to write a full DWORD (which simplifies many things). |
1790 | _ASSERTE(m_dwCurrent == 0 && m_cCurrentBits == 0 && cBits == kBitsPerDWORD); |
1791 | |
1792 | *m_pNext++ = dwData; // Write a full DWORD directly from the input |
1793 | |
1794 | // That's it, there's no more data to write and the only state update to the write was advancing |
1795 | // the buffer pointer (cache DWORD is already in the correct state, see asserts above). |
1796 | return; |
1797 | } |
1798 | |
1799 | // Calculate a mask of the low-order bits we're going to extract from the input data. |
1800 | DWORD dwInitialMask = (1U << cInitialBits) - 1; |
1801 | |
1802 | // OR those bits into the cache (properly shifted to fit above the data already there). |
1803 | m_dwCurrent |= (dwData & dwInitialMask) << m_cCurrentBits; |
1804 | |
1805 | // Update the cache bit counter for the new data. |
1806 | m_cCurrentBits += cInitialBits; |
1807 | if (m_cCurrentBits == kBitsPerDWORD) |
1808 | { |
1809 | // The cache filled up. Write the DWORD to the buffer and reset the cache state to empty. |
1810 | *m_pNext++ = m_dwCurrent; |
1811 | m_dwCurrent = 0; |
1812 | m_cCurrentBits = 0; |
1813 | } |
1814 | |
1815 | // If the bits we just inserted comprised all the input bits we're done. |
1816 | if (cInitialBits == cBits) |
1817 | return; |
1818 | |
1819 | // There's more data to write. But we can only get here if we just flushed the cache. So there is a |
1820 | // whole DWORD free in the cache and we're guaranteed to have less than a DWORD of data left to write. |
1821 | // As a result we can simply populate the low-order bits of the cache with our remaining data (simply |
1822 | // shift down by the number of bits we've already written) and we're done. |
1823 | _ASSERTE(m_dwCurrent == 0 && m_cCurrentBits == 0); |
1824 | m_dwCurrent = dwData >>= cInitialBits; |
1825 | m_cCurrentBits = cBits - cInitialBits; |
1826 | } |
1827 | |
1828 | // Because we cache a DWORD of data before writing it it's possible that there are still unwritten bits |
1829 | // left in the cache once you've finished writing data. Call this operation after all Writes() are |
1830 | // completed to flush any such data to memory. It's not legal to call Write() again after a Flush(). |
1831 | void Flush() |
1832 | { |
1833 | LIMITED_METHOD_CONTRACT; |
1834 | |
1835 | // Nothing to do if the cache is empty. |
1836 | if (m_cCurrentBits == 0) |
1837 | return; |
1838 | |
1839 | // Write what we have to memory (unused high-order bits will be zero). |
1840 | *m_pNext = m_dwCurrent; |
1841 | |
1842 | // Catch any attempt to make a further Write() call. |
1843 | m_pNext = NULL; |
1844 | } |
1845 | |
1846 | // Get the count of bits written so far (logically, this number does not take caching into account). |
1847 | DWORD GetBitsWritten() |
1848 | { |
1849 | LIMITED_METHOD_CONTRACT; |
1850 | |
1851 | return m_cBitsWritten; |
1852 | } |
1853 | |
1854 | private: |
1855 | enum { kBitsPerDWORD = sizeof(DWORD) * 8 }; |
1856 | |
1857 | DWORD *m_pNext; // Pointer to the next DWORD that will be written in the buffer |
1858 | DWORD m_dwCurrent; // We cache up to a DWORD of data before writing it to the buffer |
1859 | DWORD m_cCurrentBits; // Count of valid (low-order) bits in the buffer above |
1860 | DWORD m_cBitsWritten; // Count of bits given to Write() (ignores caching) |
1861 | }; |
1862 | |
1863 | // A specialized node used to write the compressed portions of a LookupMap to an ngen image. This is |
1864 | // (optionally) allocated by a call to DataImage::StoreCompressedLayoutMap from LookupMapBase::Save() and |
1865 | // handles allocation and initialization of the compressed table and an index used to navigate the table |
1866 | // efficiently. The allocation of the map itself and any hot item list is still handled externally but this |
1867 | // node will perform any fixups in the base map required to refer to the new compressed data. |
1868 | // |
1869 | // Since the compression algorithm used depends on the precise values of the RVAs referenced by the LookupMap |
1870 | // the compression doesn't happen until ComputeRVA is called (don't call GetSize() until after ComputeRVA() |
1871 | // returns). Additionally we must ensure that this node's ComputeRVA() is not called until after that of every |
1872 | // node on those RVA it depends. Currently this is ensured by placing this node near the end of the .text |
1873 | // section (after pointers to any read-only data structures referenced by LookupMaps and after the .data |
1874 | // section containing writeable structures). |
1875 | class ZapCompressedLookupMap : public ZapNode |
1876 | { |
1877 | DataImage *m_pImage; // Back pointer to the allocating DataImage |
1878 | LookupMapBase *m_pMap; // Back pointer to the LookupMap we're compressing |
1879 | BYTE *m_pTable; // ComputeRVA allocates a compressed table here |
1880 | BYTE *m_pIndex; // ComputeRVA allocates a table index here |
1881 | DWORD m_cbTable; // Size (in bytes) of the table above (after ComputeRVA) |
1882 | DWORD m_cbIndex; // Size (in bytes) of the index above (after ComputeRVA) |
1883 | DWORD m_cBitsPerIndexEntry; // Number of bits in each index entry |
1884 | DWORD m_rgHistogram[kBitsPerRVA]; // Table of frequencies of different delta lengths |
1885 | BYTE m_rgEncodingLengths[kLookupMapLengthEntries]; // Table of different bit lengths value deltas can take |
1886 | BYTE m_eKind; // Item kind (DataImage::ITEM_COMPRESSED_MAP currently) |
1887 | |
1888 | public: |
1889 | ZapCompressedLookupMap(DataImage *pImage, LookupMapBase *pMap, BYTE eKind) |
1890 | : m_pImage(pImage), m_pMap(pMap), m_eKind(eKind) |
1891 | { |
1892 | LIMITED_METHOD_CONTRACT; |
1893 | } |
1894 | |
1895 | DataImage::ItemKind GetKind() |
1896 | { |
1897 | LIMITED_METHOD_CONTRACT; |
1898 | |
1899 | return (DataImage::ItemKind)m_eKind; |
1900 | } |
1901 | |
1902 | virtual DWORD GetSize() |
1903 | { |
1904 | LIMITED_METHOD_CONTRACT; |
1905 | |
1906 | if (!ShouldCompressedMapBeSaved()) |
1907 | return 0; |
1908 | |
1909 | // This isn't legal until ComputeRVA() is called. Check this by seeing if the compressed version of |
1910 | // the table is allocated yet. |
1911 | _ASSERTE(m_pTable != NULL); |
1912 | return m_cbIndex + m_cbTable; |
1913 | } |
1914 | |
1915 | virtual UINT GetAlignment() |
1916 | { |
1917 | LIMITED_METHOD_CONTRACT; |
1918 | |
1919 | if (!ShouldCompressedMapBeSaved()) |
1920 | return 1; |
1921 | |
1922 | // The table and index have no pointers but do require DWORD alignment. |
1923 | return sizeof(DWORD); |
1924 | } |
1925 | |
1926 | virtual ZapNodeType GetType() |
1927 | { |
1928 | STANDARD_VM_CONTRACT; |
1929 | |
1930 | return NodeTypeForItemKind(m_eKind); |
1931 | } |
1932 | |
1933 | virtual DWORD ComputeRVA(ZapWriter *pZapWriter, DWORD dwPos) |
1934 | { |
1935 | STANDARD_VM_CONTRACT; |
1936 | |
1937 | if (ShouldCompressedMapBeSaved()) |
1938 | { |
1939 | |
1940 | // This is the earliest opportunity at which all data is available in order to compress the table. In |
1941 | // particular all values in the table (currently MethodTable* or MethodDesc*) point to structures |
1942 | // which have been assigned final RVAs in the image. We can thus compute a compressed table value that |
1943 | // relies on the relationship between these RVAs. |
1944 | |
1945 | // Phase 1: Look through all the entries in the table. Look at the deltas between RVAs for adjacent |
1946 | // items and build a histogram of how many entries require a specific number to encode their delta |
1947 | // (using a scheme we we discard non-significant low and high-order zero bits). This call will |
1948 | // initialize m_rgHistogram so that entry 0 contains the number of entries that require 1 bit to |
1949 | // encode their delta, entry 1 the count of those that require 2 bits etc. up to the last entry (how |
1950 | // many entries require the full 32 bits). Note that even on 64-bit platforms we only currently |
1951 | // support 32-bit RVAs. |
1952 | DWORD cRids = AnalyzeTable(); |
1953 | |
1954 | // Phase 2: Given the histogram above, calculate the set of delta lengths for the encoding table |
1955 | // (m_rgEncodingLengths) that will result in optimal table size. We have a fixed size encoding length |
1956 | // so we don't have to embed a large fixed-size length field for every compressed entry but we can |
1957 | // still cope with the relatively rare but ever-present worst case entries which require many bits of |
1958 | // delta entry. |
1959 | OptimizeEncodingLengths(); |
1960 | |
1961 | // Phase 3: We now have enough data to allocate the final data structures (the compressed table itself |
1962 | // and an index that bookmarks every kLookupMapIndexStride'th entry). Both structures must start |
1963 | // DWORD-aligned and have a DWORD-aligned size (requirements of BitStreamWriter). |
1964 | |
1965 | // PredictCompressedSize() returns its result in bits so we must convert (rounding up) to bytes before |
1966 | // DWORD aligning. |
1967 | m_cbTable = AlignUp((PredictCompressedSize(m_rgEncodingLengths) + 7) / 8, sizeof(DWORD)); |
1968 | |
1969 | // Each index entry contains a bit offset into the compressed stream (so we must size for the worst |
1970 | // case of an offset at the end of the stream) plus an RVA. |
1971 | m_cBitsPerIndexEntry = BitsRequired(m_cbTable * 8) + kBitsPerRVA; |
1972 | _ASSERTE(m_cBitsPerIndexEntry > 0); |
1973 | |
1974 | // Our first index entry is for entry 0 (rather than entry kLookupMapIndexStride) so we must be |
1975 | // sure to round up the number of index entries we need in order to cover the table. |
1976 | DWORD cIndexEntries = (cRids + (kLookupMapIndexStride - 1)) / kLookupMapIndexStride; |
1977 | |
1978 | // Since we calculate the index size in bits we need to round up to bytes before DWORD aligning. |
1979 | m_cbIndex = AlignUp(((m_cBitsPerIndexEntry * cIndexEntries) + 7) / 8, sizeof(DWORD)); |
1980 | |
1981 | // Allocate both table and index from a single chunk of memory. |
1982 | BYTE *pMemory = new BYTE[m_cbIndex + m_cbTable]; |
1983 | m_pTable = pMemory; |
1984 | m_pIndex = pMemory + m_cbTable; |
1985 | |
1986 | // Phase 4: We've now calculated all the input data we need and allocated memory for the output so we |
1987 | // can go ahead and fill in the compressed table and index. |
1988 | InitializeTableAndIndex(); |
1989 | |
1990 | // Phase 5: Go back up update the saved version of the LookupMap (redirect the table pointer to the |
1991 | // compressed table and fill in the other fields which aren't valid until the table is compressed). |
1992 | LookupMapBase *pSaveMap = (LookupMapBase*)m_pImage->GetImagePointer(m_pMap); |
1993 | pSaveMap->pTable = (TADDR*)m_pTable; |
1994 | pSaveMap->pIndex = m_pIndex; |
1995 | pSaveMap->cIndexEntryBits = m_cBitsPerIndexEntry; |
1996 | pSaveMap->cbTable = m_cbTable; |
1997 | pSaveMap->cbIndex = m_cbIndex; |
1998 | memcpy(pSaveMap->rgEncodingLengths, m_rgEncodingLengths, sizeof(m_rgEncodingLengths)); |
1999 | |
2000 | // Schedule fixups for the map pointers to the compressed table and index. |
2001 | m_pImage->FixupFieldToNode(m_pMap, offsetof(LookupMapBase, pTable), this, 0); |
2002 | m_pImage->FixupFieldToNode(m_pMap, offsetof(LookupMapBase, pIndex), this, m_cbTable); |
2003 | } |
2004 | |
2005 | // We're done with generating the compressed table. Now we need to do the work ComputeRVA() is meant |
2006 | // to do: |
2007 | dwPos = AlignUp(dwPos, GetAlignment()); // Satisfy our alignment requirements |
2008 | SetRVA(dwPos); // Set the RVA of the node (both table and index) |
2009 | dwPos += GetSize(); // Advance the RVA past our node |
2010 | |
2011 | return dwPos; |
2012 | } |
2013 | |
2014 | virtual void Save(ZapWriter *pZapWriter) |
2015 | { |
2016 | STANDARD_VM_CONTRACT; |
2017 | |
2018 | if (!ShouldCompressedMapBeSaved()) |
2019 | return; |
2020 | |
2021 | // Save both the table and index. |
2022 | pZapWriter->Write(m_pTable, m_cbTable); |
2023 | pZapWriter->Write(m_pIndex, m_cbIndex); |
2024 | } |
2025 | |
2026 | private: |
2027 | |
2028 | // It's possible that our node has been created and only later the decision is made to store the full |
2029 | // uncompressed table. In this case, we want to early out of our work and make saving our node a no-op. |
2030 | BOOL ShouldCompressedMapBeSaved() |
2031 | { |
2032 | LIMITED_METHOD_CONTRACT; |
2033 | |
2034 | // To identify whether compression is desired, use the flag from LookupMapBase::Save |
2035 | return (m_pMap->cIndexEntryBits > 0); |
2036 | } |
2037 | |
2038 | // Phase 1: Look through all the entries in the table. Look at the deltas between RVAs for adjacent items |
2039 | // and build a histogram of how many entries require a specific number to encode their delta (using a |
2040 | // scheme we we discard non-significant low and high-order zero bits). This call will initialize |
2041 | // m_rgHistogram so that entry 0 contains the number of entries that require 1 bit to encode their delta, |
2042 | // entry 1 the count of those that require 2 bits etc. up to the last entry (how many entries require the |
2043 | // full 32 bits). Note that even on 64-bit platforms we only currently support 32-bit RVAs. |
2044 | DWORD AnalyzeTable() |
2045 | { |
2046 | STANDARD_VM_CONTRACT; |
2047 | |
2048 | LookupMapBase *pMap = m_pMap; |
2049 | DWORD dwLastValue = 0; |
2050 | DWORD cRids = 0; |
2051 | |
2052 | // Initialize the histogram to all zeroes. |
2053 | memset(m_rgHistogram, 0, sizeof(m_rgHistogram)); |
2054 | |
2055 | // Walk each node in the map. |
2056 | while (pMap) |
2057 | { |
2058 | // Walk each entry in this node. |
2059 | for (DWORD i = 0; i < pMap->dwCount; i++) |
2060 | { |
2061 | DWORD dwCurrentValue = ComputeElementRVA(pMap, i); |
2062 | |
2063 | // Calculate the delta from the last entry. We split the delta into two-components: a bool |
2064 | // indicating whether the RVA was higher or lower and an absolute (non-negative) size. Sort of |
2065 | // like a ones-complement signed number. |
2066 | bool fIncreasingDelta = dwCurrentValue > dwLastValue; |
2067 | DWORD dwDelta = fIncreasingDelta ? (dwCurrentValue - dwLastValue) : (dwLastValue - dwCurrentValue); |
2068 | |
2069 | // Determine the minimum number of bits required to represent the delta (by stripping |
2070 | // non-significant leading zeros) and update the count in the histogram of the number of |
2071 | // deltas that required this many bits. We never encode anything with zero bits (only the |
2072 | // value zero would be eligibil and it's not a common value) so the first histogram entry |
2073 | // records the number of deltas encodable with one bit and so on. |
2074 | m_rgHistogram[BitsRequired(dwDelta) - 1]++; |
2075 | |
2076 | dwLastValue = dwCurrentValue; |
2077 | cRids++; |
2078 | } |
2079 | |
2080 | pMap = pMap->pNext; |
2081 | } |
2082 | |
2083 | return cRids; |
2084 | } |
2085 | |
2086 | // Phase 2: Given the histogram above, calculate the set of delta lengths for the encoding table |
2087 | // (m_rgEncodingLengths) that will result in optimal table size. We have a fixed size encoding length so |
2088 | // we don't have to embed a large fixed-size length field for every compressed entry but we can still cope |
2089 | // with the relatively rare but ever-present worst case entries which require many bits of delta entry. |
2090 | void OptimizeEncodingLengths() |
2091 | { |
2092 | STANDARD_VM_CONTRACT; |
2093 | |
2094 | // Find the longest delta (search from the large end of the histogram down for the first non-zero |
2095 | // entry). |
2096 | BYTE bMaxBits = 0; |
2097 | #ifdef _MSC_VER |
2098 | #pragma warning(suppress:6293) // Prefast doesn't understand the unsigned modulo-8 arithmetic below. |
2099 | #endif |
2100 | for (BYTE i = kBitsPerRVA - 1; i < 0xff; i--) |
2101 | if (m_rgHistogram[i] > 0) |
2102 | { |
2103 | bMaxBits = i + 1; // +1 because we never encode anything with zero bits. |
2104 | break; |
2105 | } |
2106 | _ASSERTE(bMaxBits >= 1); |
2107 | |
2108 | // Now find the smallest delta in a similar fashion. |
2109 | BYTE bMinBits = bMaxBits; |
2110 | for (BYTE i = 0; i < kBitsPerRVA; i++) |
2111 | if (m_rgHistogram[i] > 0) |
2112 | { |
2113 | bMinBits = i + 1; // +1 because we never encode anything with zero bits. |
2114 | break; |
2115 | } |
2116 | _ASSERTE(bMinBits <= bMaxBits); |
2117 | |
2118 | // The encoding lengths table is a sorted list of bit field lengths we can use to encode any |
2119 | // entry-to-entry delta in the compressed table. We go through a table so we can use a small number of |
2120 | // bits in the compressed stream (the table index) to express a very flexible range of deltas. The one |
2121 | // entry we know in advance is the largest (the last). That's because we know we have to be able to |
2122 | // encode the largest delta we found in the table or else we couldn't be functionally correct. |
2123 | m_rgEncodingLengths[kLookupMapLengthEntries - 1] = bMaxBits; |
2124 | |
2125 | // Now find optimal values for the other entries one by one. It doesn't really matter which order we |
2126 | // do them in. For each entry we'll loop through all the possible encoding lengths, dwMinBits <= |
2127 | // length < dwMaxBits, setting all the uninitialized entries to the candidate value and calculating |
2128 | // the resulting compressed size of the table. We don't enforce that the candidate sizes get smaller |
2129 | // for each entry so in that if the best use of an extra table entry is to add a larger length rather |
2130 | // than a smaller one then we'll take that. The downside is that we have to sort the table before |
2131 | // calculating the table size (the sizing algorithm is only fast for a sorted table). Luckily our |
2132 | // table is very small (currently 4 entries) and we don't have to sort one of the entries (the last is |
2133 | // always largest) so this isn't such a huge deal. |
2134 | for (DWORD i = 0; i < kLookupMapLengthEntries - 1; i++) |
2135 | { |
2136 | DWORD dwBestSize = 0xffffffff; // Best overall table size so far |
2137 | BYTE bBestLength = bMaxBits; // The candidate value that lead to the above |
2138 | |
2139 | // Iterate over all the values that could generate a good result (no point trying values smaller |
2140 | // than the smallest delta we have or as large as the maximum table entry we've already fixed). |
2141 | for (BYTE j = bMinBits; j < bMaxBits; j++) |
2142 | { |
2143 | // Build a temporary (unsorted) encoding table. |
2144 | BYTE rgTempBuckets[kLookupMapLengthEntries]; |
2145 | |
2146 | // Entries before the current one are set to the values we've already determined in previous |
2147 | // iterations. |
2148 | for (DWORD k = 0; k < i; k++) |
2149 | rgTempBuckets[k] = m_rgEncodingLengths[k]; |
2150 | |
2151 | // The current entry and the remaining uninitialized entries are all set to the current |
2152 | // candidate value (this is logically the equivalent of removing the non-current uninitialized |
2153 | // entries from the table altogether). |
2154 | for (DWORD k = i; k < kLookupMapLengthEntries - 1; k++) |
2155 | rgTempBuckets[k] = j; |
2156 | |
2157 | // The last entry is always the maximum bit length. |
2158 | rgTempBuckets[kLookupMapLengthEntries - 1] = bMaxBits; |
2159 | |
2160 | // Sort the temporary table so that the call to PredictCompressedSize() below behaves |
2161 | // correctly (and fast). |
2162 | SortLengthBuckets(rgTempBuckets); |
2163 | |
2164 | // See what size of table this would generate. |
2165 | DWORD dwTestSize = PredictCompressedSize(rgTempBuckets); |
2166 | if (dwTestSize < dwBestSize) |
2167 | { |
2168 | // The result is better than our current best, remember it. |
2169 | dwBestSize = dwTestSize; |
2170 | bBestLength = j; |
2171 | } |
2172 | } |
2173 | |
2174 | // Set the current entry to the best length we found. |
2175 | m_rgEncodingLengths[i] = bBestLength; |
2176 | } |
2177 | |
2178 | // We've picked optimal values for all entries, but the result is unsorted. Fix that now. |
2179 | SortLengthBuckets(m_rgEncodingLengths); |
2180 | } |
2181 | |
2182 | // Phase 4: We've now calculated all the input data we need and allocated memory for the output so we can |
2183 | // go ahead and fill in the compressed table and index. |
2184 | void InitializeTableAndIndex() |
2185 | { |
2186 | STANDARD_VM_CONTRACT; |
2187 | |
2188 | // Initialize bit stream writers to the start of the compressed table and index. |
2189 | BitStreamWriter sTableStream((DWORD*)m_pTable); |
2190 | BitStreamWriter sIndexStream((DWORD*)m_pIndex); |
2191 | |
2192 | DWORD dwRid = 0; |
2193 | DWORD dwLastValue = 0; |
2194 | LookupMapBase *pMap = m_pMap; |
2195 | |
2196 | // Walk each node in the map. |
2197 | while (pMap) |
2198 | { |
2199 | // Walk each entry in this node. |
2200 | for (DWORD i = 0; i < pMap->dwCount; i++) |
2201 | { |
2202 | DWORD dwCurrentValue = ComputeElementRVA(pMap, i); |
2203 | |
2204 | // Calculate the delta from the last entry. We split the delta into two-components: a bool |
2205 | // indicating whether the RVA was higher or lower and an absolute (non-negative) size. Sort of |
2206 | // like a ones-complement signed number. |
2207 | bool fIncreasingDelta = dwCurrentValue > dwLastValue; |
2208 | DWORD dwDelta = fIncreasingDelta ? (dwCurrentValue - dwLastValue) : (dwLastValue - dwCurrentValue); |
2209 | |
2210 | // As a trade-off we can't store deltas with their most efficient length (because just |
2211 | // encoding the length can dominate the space requirement when we have to cope with worst-case |
2212 | // deltas). Instead we encode a relatively short index into the table of encoding lengths we |
2213 | // calculated back in phase 2. So some deltas will encode in more bits than necessary but |
2214 | // overall we'll win due to lowered prefix bit requirements. |
2215 | // Look through all the table entries and choose the first that's large enough to accomodate |
2216 | // our delta. |
2217 | DWORD dwDeltaBitLength = BitsRequired(dwDelta); |
2218 | DWORD j; |
2219 | for (j = 0; j < kLookupMapLengthEntries; j++) |
2220 | { |
2221 | if (m_rgEncodingLengths[j] >= dwDeltaBitLength) |
2222 | { |
2223 | dwDeltaBitLength = m_rgEncodingLengths[j]; |
2224 | break; |
2225 | } |
2226 | } |
2227 | _ASSERTE(j < kLookupMapLengthEntries); |
2228 | |
2229 | // Write the entry into the compressed table. |
2230 | sTableStream.Write(j, kLookupMapLengthBits); // The index for the delta length |
2231 | sTableStream.Write(fIncreasingDelta ? 1 : 0, 1); // The +/- delta indicator |
2232 | sTableStream.Write(dwDelta, dwDeltaBitLength); // The delta itself |
2233 | |
2234 | // Is this entry one that requires a corresponding index entry? |
2235 | if ((dwRid % kLookupMapIndexStride) == 0) |
2236 | { |
2237 | // Write an index entry: |
2238 | // * The current (map-relative) RVA. |
2239 | // * The position in the table bit stream of the next entry. |
2240 | sIndexStream.Write(dwCurrentValue, kBitsPerRVA); |
2241 | sIndexStream.Write(sTableStream.GetBitsWritten(), m_cBitsPerIndexEntry - kBitsPerRVA); |
2242 | } |
2243 | |
2244 | dwRid++; |
2245 | |
2246 | dwLastValue = dwCurrentValue; |
2247 | } |
2248 | |
2249 | pMap = pMap->pNext; |
2250 | } |
2251 | |
2252 | // Flush any remaining bits in the caches of the table and index stream writers. |
2253 | sTableStream.Flush(); |
2254 | sIndexStream.Flush(); |
2255 | |
2256 | // Make sure what we wrote fitted in what we allocated. |
2257 | _ASSERTE((sTableStream.GetBitsWritten() / 8) <= m_cbTable); |
2258 | _ASSERTE((sIndexStream.GetBitsWritten() / 8) <= m_cbIndex); |
2259 | |
2260 | // Also check that we didn't have more than 31 bits of excess space allocated either (we should have |
2261 | // allocated DWORD aligned lengths). |
2262 | _ASSERTE(((m_cbTable * 8) - sTableStream.GetBitsWritten()) < 32); |
2263 | _ASSERTE(((m_cbIndex * 8) - sIndexStream.GetBitsWritten()) < 32); |
2264 | } |
2265 | |
2266 | // Determine the final, map-relative RVA of the element at a specified index |
2267 | DWORD ComputeElementRVA(LookupMapBase *pMap, DWORD index) |
2268 | { |
2269 | STANDARD_VM_CONTRACT; |
2270 | |
2271 | // We base our RVAs on the RVA of the map (rather than the module). This is purely because individual |
2272 | // maps don't store back pointers to their owning module so it's easier to recover pointer values at |
2273 | // runtime using the map address instead. |
2274 | DWORD rvaBase = m_pImage->GetRVA(m_pMap); |
2275 | |
2276 | // Retrieve the pointer value in the specified entry. This is tricky since the pointer is |
2277 | // encoded as a RelativePointer. |
2278 | DWORD dwFinalRVA; |
2279 | TADDR entry = RelativePointer<TADDR>::GetValueMaybeNullAtPtr((TADDR)&pMap->pTable[index]); |
2280 | if (entry == 0) |
2281 | { |
2282 | // The pointer was null. We encode this as a zero RVA (RVA pointing to the map itself, |
2283 | // which should never happen otherwise). |
2284 | dwFinalRVA = 0; |
2285 | } |
2286 | else |
2287 | { |
2288 | // Non-null pointer, go get the RVA it's been mapped to. Transform this RVA into our |
2289 | // special map-relative variant by substracting the map base. |
2290 | |
2291 | // Some of the pointer alignment bits may have been used as flags; preserve them. |
2292 | DWORD flags = entry & ((1 << kFlagBits) - 1); |
2293 | entry -= flags; |
2294 | |
2295 | // We only support compressing maps of pointers to saved objects (e.g. no indirected FixupPointers) |
2296 | // so there is guaranteed to be a valid RVA at this point. If this does not hold, GetRVA will assert. |
2297 | DWORD rvaEntry = m_pImage->GetRVA((void*)entry); |
2298 | |
2299 | dwFinalRVA = rvaEntry - rvaBase + flags; |
2300 | } |
2301 | |
2302 | return dwFinalRVA; |
2303 | } |
2304 | |
2305 | // Determine the number of bits required to represent the significant portion of a value (i.e. the value |
2306 | // without any leading 0s). Always return 1 as a minimum (we do not encode 0 in 0 bits). |
2307 | DWORD BitsRequired(DWORD dwValue) |
2308 | { |
2309 | LIMITED_METHOD_CONTRACT; |
2310 | |
2311 | #if (defined(_TARGET_X86_) || defined(_TARGET_AMD64_)) && defined(_MSC_VER) |
2312 | |
2313 | // This this operation could impact the performance of ngen (we call this a *lot*) we'll try and |
2314 | // optimize this where we can. x86 and amd64 actually have instructions to find the least and most |
2315 | // significant bits in a DWORD and MSVC exposes this as a builtin. |
2316 | DWORD dwHighBit; |
2317 | if (_BitScanReverse(&dwHighBit, dwValue)) |
2318 | return dwHighBit + 1; |
2319 | else |
2320 | return 1; |
2321 | |
2322 | #else // (_TARGET_X86_ || _TARGET_AMD64_) && _MSC_VER |
2323 | |
2324 | // Otherwise we'll calculate this the slow way. Pick off the 32-bit case first due to avoid the |
2325 | // usual << problem (x << 32 == x, not 0). |
2326 | if (dwValue > 0x7fffffff) |
2327 | return 32; |
2328 | |
2329 | DWORD cBits = 1; |
2330 | while (dwValue > ((1U << cBits) - 1)) |
2331 | cBits++; |
2332 | |
2333 | return cBits; |
2334 | |
2335 | #endif // (_TARGET_X86_ || _TARGET_AMD64_) && _MSC_VER |
2336 | } |
2337 | |
2338 | // Sort the given input array (of kLookupMapLengthEntries entries, where the last entry is already sorted) |
2339 | // from lowest to highest value. |
2340 | void SortLengthBuckets(BYTE rgBuckets[]) |
2341 | { |
2342 | LIMITED_METHOD_CONTRACT; |
2343 | |
2344 | // This simplistic insertion sort algorithm is probably the fastest for small values of |
2345 | // kLookupMapLengthEntries. |
2346 | _ASSERTE(kLookupMapLengthEntries < 10); |
2347 | |
2348 | // Iterate over every entry apart from the last two, moving the correct sorted value into each in |
2349 | // turn. Don't do the last value because it's already sorted and the second last because it'll be |
2350 | // sorted by the time we've done all the rest. |
2351 | for (DWORD i = 0; i < (kLookupMapLengthEntries - 2); i++) |
2352 | { |
2353 | BYTE bLowValue = rgBuckets[i]; // The lowest value we've seen so far |
2354 | DWORD dwLowIndex = i; // The index which held that value |
2355 | |
2356 | // Look through the unsorted entries for the smallest. |
2357 | for (DWORD j = i + 1; j < (kLookupMapLengthEntries - 1); j++) |
2358 | { |
2359 | if (rgBuckets[j] < bLowValue) |
2360 | { |
2361 | // Got a bette candidate for smallest. |
2362 | bLowValue = rgBuckets[j]; |
2363 | dwLowIndex = j; |
2364 | } |
2365 | } |
2366 | |
2367 | // If the original value at the current index wasn't the smallest, swap it with the one that was. |
2368 | if (dwLowIndex != i) |
2369 | { |
2370 | rgBuckets[dwLowIndex] = rgBuckets[i]; |
2371 | rgBuckets[i] = bLowValue; |
2372 | } |
2373 | } |
2374 | |
2375 | #ifdef _DEBUG |
2376 | // Check the table really is sorted. |
2377 | for (DWORD i = 1; i < kLookupMapLengthEntries; i++) |
2378 | _ASSERTE(rgBuckets[i] >= rgBuckets[i - 1]); |
2379 | #endif // _DEBUG |
2380 | } |
2381 | |
2382 | // Given the histogram of the delta lengths and a prospective table of the subset of those lengths that |
2383 | // we'd utilize to encode the table, return the size (in bits) of the compressed table we'd get as a |
2384 | // result. The algorithm requires that the encoding length table is sorted (smallest to largest length). |
2385 | DWORD PredictCompressedSize(BYTE rgBuckets[]) |
2386 | { |
2387 | LIMITED_METHOD_CONTRACT; |
2388 | |
2389 | DWORD cTotalBits = 0; |
2390 | |
2391 | // Iterate over each entry in the histogram (first entry is the number of deltas that can be encoded |
2392 | // in 1 bit, the second is the number of entries encodable in 2 bits etc.). |
2393 | for (DWORD i = 0; i < kBitsPerRVA; i++) |
2394 | { |
2395 | // Start by assuming that we can encode entries in this bucket with their exact length. |
2396 | DWORD cBits = i + 1; |
2397 | |
2398 | // Look through the encoding table to find the first (lowest) encoding length that can encode the |
2399 | // values for this bucket. |
2400 | for (DWORD j = 0; j < kLookupMapLengthEntries; j++) |
2401 | { |
2402 | if (cBits <= rgBuckets[j]) |
2403 | { |
2404 | // This is the best encoding we can do. Remember the real cost of all entries in this |
2405 | // histogram bucket. |
2406 | cBits = rgBuckets[j]; |
2407 | break; |
2408 | } |
2409 | } |
2410 | |
2411 | // Each entry for this histogram bucket costs a fixed size index into the encoding length table |
2412 | // (kLookupMapLengthBits), a single bit of delta sign plus the number of bits of delta magnitude |
2413 | // that we calculated above. |
2414 | cTotalBits += (kLookupMapLengthBits + 1 + cBits) * m_rgHistogram[i]; |
2415 | } |
2416 | |
2417 | return cTotalBits; |
2418 | } |
2419 | }; |
2420 | |
2421 | // Allocate a special zap node that will compress the cold rid map associated with the given LookupMap. |
2422 | void DataImage::StoreCompressedLayoutMap(LookupMapBase *pMap, ItemKind kind) |
2423 | { |
2424 | STANDARD_VM_CONTRACT; |
2425 | |
2426 | ZapNode *pNode = new (GetHeap()) ZapCompressedLookupMap(this, pMap, static_cast<BYTE>(kind)); |
2427 | |
2428 | AddStructureInOrder(pNode); |
2429 | } |
2430 | |
2431 | #endif // FEATURE_PREJIT |
2432 | |