1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4//
5// ZapRelocs.cpp
6//
7
8//
9// Zapping of relocations
10//
11// ======================================================================================
12
13#include "common.h"
14
15#include "zaprelocs.h"
16
17#ifdef REDHAWK
18void PDB_NoticeReloc(ZapRelocationType type, DWORD rvaReloc, ZapNode * pTarget, int targetOffset);
19#endif
20
21void ZapBaseRelocs::WriteReloc(PVOID pSrc, int offset, ZapNode * pTarget, int targetOffset, ZapRelocationType type)
22{
23 _ASSERTE(pTarget != NULL);
24
25 PBYTE pLocation = (PBYTE)pSrc + offset;
26 DWORD rva = m_pImage->GetCurrentRVA() + offset;
27 TADDR pActualTarget = (TADDR)m_pImage->GetBaseAddress() + pTarget->GetRVA() + targetOffset;
28
29#ifdef REDHAWK
30 PDB_NoticeReloc(type, rva, pTarget, targetOffset);
31#endif
32
33 switch (type)
34 {
35 case IMAGE_REL_BASED_ABSOLUTE:
36 *(UNALIGNED DWORD *)pLocation = pTarget->GetRVA() + targetOffset;
37 // IMAGE_REL_BASED_ABSOLUTE does not need base reloc entry
38 return;
39
40 case IMAGE_REL_BASED_ABSOLUTE_TAGGED:
41 _ASSERTE(targetOffset == 0);
42 *(UNALIGNED DWORD *)pLocation = (DWORD)CORCOMPILE_TAG_TOKEN(pTarget->GetRVA());
43 // IMAGE_REL_BASED_ABSOLUTE_TAGGED does not need base reloc entry
44 return;
45
46 case IMAGE_REL_BASED_PTR:
47#ifdef _TARGET_ARM_
48 // Misaligned relocs disable ASLR on ARM. We should never ever emit them.
49 _ASSERTE(IS_ALIGNED(rva, TARGET_POINTER_SIZE));
50#endif
51 *(UNALIGNED TARGET_POINTER_TYPE *)pLocation = (TARGET_POINTER_TYPE)pActualTarget;
52 break;
53
54 case IMAGE_REL_BASED_RELPTR:
55 {
56 TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
57 *(UNALIGNED TADDR *)pLocation = (INT32)(pActualTarget - pSite);
58 }
59 // neither IMAGE_REL_BASED_RELPTR nor IMAGE_REL_BASED_MD_METHODENTRY need base reloc entry
60 return;
61
62 case IMAGE_REL_BASED_RELPTR32:
63 {
64 TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
65 *(UNALIGNED INT32 *)pLocation = (INT32)(pActualTarget - pSite);
66 }
67 // IMAGE_REL_BASED_RELPTR32 does not need base reloc entry
68 return;
69
70#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
71 case IMAGE_REL_BASED_REL32:
72 {
73 TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
74 *(UNALIGNED INT32 *)pLocation = (INT32)(pActualTarget - (pSite + sizeof(INT32)));
75 }
76 // IMAGE_REL_BASED_REL32 does not need base reloc entry
77 return;
78#endif // _TARGET_X86_ || _TARGET_AMD64_
79
80#if defined(_TARGET_ARM_)
81 case IMAGE_REL_BASED_THUMB_MOV32:
82 {
83 PutThumb2Mov32((UINT16 *)pLocation, (UINT32)pActualTarget);
84 break;
85 }
86
87 case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL:
88 {
89 TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
90
91 // For details about how the value is calculated, see
92 // description of IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL
93 const UINT32 offsetCorrection = 12;
94
95 UINT32 imm32 = UINT32(pActualTarget - (pSite + offsetCorrection));
96
97 PutThumb2Mov32((UINT16 *)pLocation, imm32);
98
99 // IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL does not need base reloc entry
100 return;
101 }
102
103 case IMAGE_REL_BASED_THUMB_BRANCH24:
104 {
105 TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
106
107 // Kind of a workaround: make this reloc work both for calls (which have the thumb bit set),
108 // and for relative jumps used for hot/cold splitting (which don't).
109 pActualTarget &= ~THUMB_CODE;
110
111 // Calculate the reloffset without the ThumbBit set so that it can be correctly encoded.
112 _ASSERTE(!(pActualTarget & THUMB_CODE));// we expect pActualTarget not to have the thumb bit set
113 _ASSERTE(!(pSite & THUMB_CODE)); // we expect pSite not to have the thumb bit set
114 INT32 relOffset = (INT32)(pActualTarget - (pSite + sizeof(INT32)));
115 if (!FitsInThumb2BlRel24(relOffset))
116 {
117 // Retry the compilation with IMAGE_REL_BASED_THUMB_BRANCH24 relocations disabled
118 // (See code:ZapInfo::getRelocTypeHint)
119 ThrowHR(COR_E_OVERFLOW);
120 }
121 PutThumb2BlRel24((UINT16 *)pLocation, relOffset);
122 }
123 // IMAGE_REL_BASED_THUMB_BRANCH24 does not need base reloc entry
124 return;
125#endif // defined(_TARGET_ARM_)
126#if defined(_TARGET_ARM64_)
127 case IMAGE_REL_ARM64_BRANCH26:
128 {
129 TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
130
131 INT32 relOffset = (INT32)(pActualTarget - pSite);
132 if (!FitsInRel28(relOffset))
133 {
134 ThrowHR(COR_E_OVERFLOW);
135 }
136 PutArm64Rel28((UINT32 *)pLocation,relOffset);
137 }
138 return;
139
140 case IMAGE_REL_ARM64_PAGEBASE_REL21:
141 {
142 TADDR pSitePage = ((TADDR)m_pImage->GetBaseAddress() + rva) & 0xFFFFFFFFFFFFF000LL;
143 TADDR pActualTargetPage = pActualTarget & 0xFFFFFFFFFFFFF000LL;
144
145 INT64 relPage = (INT64)(pActualTargetPage - pSitePage);
146 INT32 imm21 = (INT32)(relPage >> 12) & 0x1FFFFF;
147 PutArm64Rel21((UINT32 *)pLocation, imm21);
148 }
149 return;
150
151 case IMAGE_REL_ARM64_PAGEOFFSET_12A:
152 {
153 INT32 imm12 = (INT32)(pActualTarget & 0xFFFLL);
154 PutArm64Rel12((UINT32 *)pLocation, imm12);
155 }
156 return;
157#endif
158
159 default:
160 _ASSERTE(!"Unknown relocation type");
161 break;
162 }
163
164 DWORD page = AlignDown(rva, RELOCATION_PAGE_SIZE);
165
166 if (page != m_page)
167 {
168 FlushWriter();
169
170 m_page = page;
171 m_pageIndex = m_SerializedRelocs.GetCount();
172
173 // Reserve space for IMAGE_BASE_RELOCATION
174 for (size_t iSpace = 0; iSpace < sizeof(IMAGE_BASE_RELOCATION) / sizeof(USHORT); iSpace++)
175 m_SerializedRelocs.Append(0);
176 }
177
178 m_SerializedRelocs.Append((USHORT)(AlignmentTrim(rva, RELOCATION_PAGE_SIZE) | (type << 12)));
179}
180
181void ZapBaseRelocs::FlushWriter()
182{
183 if (m_page != 0)
184 {
185 // The blocks has to be 4-byte aligned
186 if (m_SerializedRelocs.GetCount() & 1)
187 m_SerializedRelocs.Append(0);
188
189 IMAGE_BASE_RELOCATION * pBaseRelocation = (IMAGE_BASE_RELOCATION *)&(m_SerializedRelocs[m_pageIndex]);
190 pBaseRelocation->VirtualAddress = m_page;
191 pBaseRelocation->SizeOfBlock = (m_SerializedRelocs.GetCount() - m_pageIndex) * sizeof(USHORT);
192
193 m_page = 0;
194 }
195}
196
197void ZapBaseRelocs::Save(ZapWriter * pZapWriter)
198{
199 FlushWriter();
200
201 pZapWriter->SetWritingRelocs();
202
203 // Write the relocs as blob
204 pZapWriter->Write(&m_SerializedRelocs[0], m_SerializedRelocs.GetCount() * sizeof(USHORT));
205}
206
207//////////////////////////////////////////////////////////////////////////////
208//
209// ZapBlobWithRelocs
210//
211
212int _cdecl CmpZapRelocs(const void *p1, const void *p2)
213{
214 LIMITED_METHOD_CONTRACT;
215
216 const ZapReloc *relocTemp1 = (ZapReloc *)p1;
217 const ZapReloc *relocTemp2 = (ZapReloc *)p2;
218 if (relocTemp1->m_offset < relocTemp2->m_offset)
219 return -1;
220 else if (relocTemp1->m_offset > relocTemp2->m_offset)
221 return 1;
222 else
223 return 0;
224}
225
226void ZapBlobWithRelocs::Save(ZapWriter * pZapWriter)
227{
228 if (m_pRelocs != NULL)
229 {
230
231 // pre-pass to figure out if we need to sort
232 // if the offsets are not in ascending order AND the offsets within this
233 // array ending up describing locations in different pages, the relocation
234 // writer generates bad relocation info (e.g. multiple entries for the same page)
235 // that is no longer accepted by the OS loader
236 // Also, having relocs in ascending order allows a more compact representation.
237
238 ZapReloc *pReloc = m_pRelocs;
239
240 // we need to check only for more than one reloc entry
241 if (pReloc->m_type != IMAGE_REL_INVALID && pReloc[1].m_type != IMAGE_REL_INVALID)
242 {
243 bool isSorted = true;
244 DWORD lastOffset = pReloc->m_offset;
245 DWORD cReloc = 1;
246
247 // we start with the second entry (the first entry is already consumed)
248 while (pReloc[cReloc].m_type != IMAGE_REL_INVALID)
249 {
250 // we cannot abort the loop here because we need to count the entries
251 // to properly sort the relocs!!!
252 if (pReloc[cReloc].m_offset < lastOffset)
253 isSorted = false;
254 lastOffset = pReloc[cReloc].m_offset;
255 cReloc++;
256 }
257 if (!isSorted)
258 {
259 qsort(pReloc, cReloc, sizeof(ZapReloc), CmpZapRelocs);
260 }
261 }
262
263 ZapImage * pImage = ZapImage::GetImage(pZapWriter);
264 PBYTE pData = GetData();
265
266 for (pReloc = m_pRelocs; pReloc->m_type != IMAGE_REL_INVALID; pReloc++)
267 {
268 PBYTE pLocation = pData + pReloc->m_offset;
269 int targetOffset = 0;
270
271 // Decode the offset
272 switch (pReloc->m_type)
273 {
274 case IMAGE_REL_BASED_ABSOLUTE:
275 targetOffset = *(UNALIGNED DWORD *)pLocation;
276 break;
277
278 case IMAGE_REL_BASED_ABSOLUTE_TAGGED:
279 targetOffset = 0;
280 break;
281
282 case IMAGE_REL_BASED_PTR:
283 targetOffset = (int)*(UNALIGNED TADDR *)pLocation;
284 break;
285 case IMAGE_REL_BASED_RELPTR:
286 targetOffset = (int)*(UNALIGNED TADDR *)pLocation;
287 break;
288
289 case IMAGE_REL_BASED_RELPTR32:
290 targetOffset = (int)*(UNALIGNED INT32 *)pLocation;
291 break;
292
293#if defined(_TARGET_X86_) || defined(_TARGET_AMD64_)
294 case IMAGE_REL_BASED_REL32:
295 targetOffset = *(UNALIGNED INT32 *)pLocation;
296 break;
297#endif // _TARGET_X86_ || _TARGET_AMD64_
298
299#if defined(_TARGET_ARM_)
300 case IMAGE_REL_BASED_THUMB_MOV32:
301 case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL:
302 targetOffset = (int)GetThumb2Mov32((UINT16 *)pLocation);
303 break;
304
305 case IMAGE_REL_BASED_THUMB_BRANCH24:
306 targetOffset = GetThumb2BlRel24((UINT16 *)pLocation);
307 break;
308#endif // defined(_TARGET_ARM_)
309
310#if defined(_TARGET_ARM64_)
311 case IMAGE_REL_ARM64_BRANCH26:
312 targetOffset = (int)GetArm64Rel28((UINT32*)pLocation);
313 break;
314
315 case IMAGE_REL_ARM64_PAGEBASE_REL21:
316 targetOffset = (int)GetArm64Rel21((UINT32*)pLocation);
317 break;
318
319 case IMAGE_REL_ARM64_PAGEOFFSET_12A:
320 targetOffset = (int)GetArm64Rel12((UINT32*)pLocation);
321 break;
322
323#endif // defined(_TARGET_ARM64_)
324
325 default:
326 _ASSERTE(!"Unknown reloc type");
327 break;
328 }
329
330 pImage->WriteReloc(pData, pReloc->m_offset,
331 pReloc->m_pTargetNode, targetOffset, pReloc->m_type);
332 }
333 }
334
335 ZapBlob::Save(pZapWriter);
336}
337
338COUNT_T ZapBlobWithRelocs::GetCountOfStraddlerRelocations(DWORD dwPos)
339{
340 if (m_pRelocs == NULL)
341 return 0;
342
343 // Straddlers can exist only if the node is crossing page boundary
344 if (AlignDown(dwPos, RELOCATION_PAGE_SIZE) == AlignDown(dwPos + GetSize() - 1, RELOCATION_PAGE_SIZE))
345 return 0;
346
347 COUNT_T nStraddlers = 0;
348
349 for (ZapReloc * pReloc = m_pRelocs; pReloc->m_type != IMAGE_REL_INVALID; pReloc++)
350 {
351 if (pReloc->m_type == IMAGE_REL_BASED_PTR)
352 {
353 if (AlignmentTrim(dwPos + pReloc->m_offset, RELOCATION_PAGE_SIZE) > RELOCATION_PAGE_SIZE - TARGET_POINTER_SIZE)
354 nStraddlers++;
355 }
356 }
357
358 return nStraddlers;
359}
360
361ZapBlobWithRelocs * ZapBlobWithRelocs::NewBlob(ZapWriter * pWriter, PVOID pData, SIZE_T cbSize)
362{
363 S_SIZE_T cbAllocSize = S_SIZE_T(sizeof(ZapBlobWithRelocs)) + S_SIZE_T(cbSize);
364 if(cbAllocSize.IsOverflow())
365 ThrowHR(COR_E_OVERFLOW);
366
367 void * pMemory = new (pWriter->GetHeap()) BYTE[cbAllocSize.Value()];
368
369 ZapBlobWithRelocs * pZapBlobWithRelocs = new (pMemory) ZapBlobWithRelocs(cbSize);
370
371 if (pData != NULL)
372 memcpy((void*)(pZapBlobWithRelocs + 1), pData, cbSize);
373
374 return pZapBlobWithRelocs;
375}
376
377template <DWORD alignment>
378class ZapAlignedBlobWithRelocsConst : public ZapBlobWithRelocs
379{
380protected:
381 ZapAlignedBlobWithRelocsConst(SIZE_T cbSize)
382 : ZapBlobWithRelocs(cbSize)
383 {
384 }
385
386public:
387 virtual UINT GetAlignment()
388 {
389 return alignment;
390 }
391
392 static ZapBlobWithRelocs * NewBlob(ZapWriter * pWriter, PVOID pData, SIZE_T cbSize)
393 {
394 S_SIZE_T cbAllocSize = S_SIZE_T(sizeof(ZapAlignedBlobWithRelocsConst<alignment>)) + S_SIZE_T(cbSize);
395 if(cbAllocSize.IsOverflow())
396 ThrowHR(COR_E_OVERFLOW);
397
398 void * pMemory = new (pWriter->GetHeap()) BYTE[cbAllocSize.Value()];
399
400 ZapAlignedBlobWithRelocsConst<alignment> * pZapBlob = new (pMemory) ZapAlignedBlobWithRelocsConst<alignment>(cbSize);
401
402 if (pData != NULL)
403 memcpy((void*)(pZapBlob + 1), pData, cbSize);
404
405 return pZapBlob;
406 }
407};
408
409ZapBlobWithRelocs * ZapBlobWithRelocs::NewAlignedBlob(ZapWriter * pWriter, PVOID pData, SIZE_T cbSize, SIZE_T cbAlignment)
410{
411 switch (cbAlignment)
412 {
413 case 1:
414 return ZapBlobWithRelocs::NewBlob(pWriter, pData, cbSize);
415 case 2:
416 return ZapAlignedBlobWithRelocsConst<2>::NewBlob(pWriter, pData, cbSize);
417 case 4:
418 return ZapAlignedBlobWithRelocsConst<4>::NewBlob(pWriter, pData, cbSize);
419 case 8:
420 return ZapAlignedBlobWithRelocsConst<8>::NewBlob(pWriter, pData, cbSize);
421 case 16:
422 return ZapAlignedBlobWithRelocsConst<16>::NewBlob(pWriter, pData, cbSize);
423
424 default:
425 _ASSERTE(!"Requested alignment not supported");
426 return NULL;
427 }
428}
429