1 | #ifdef TARGET_DEFS_ONLY |
2 | |
3 | #define EM_TCC_TARGET EM_X86_64 |
4 | |
5 | /* relocation type for 32 bit data relocation */ |
6 | #define R_DATA_32 R_X86_64_32S |
7 | #define R_DATA_PTR R_X86_64_64 |
8 | #define R_JMP_SLOT R_X86_64_JUMP_SLOT |
9 | #define R_GLOB_DAT R_X86_64_GLOB_DAT |
10 | #define R_COPY R_X86_64_COPY |
11 | #define R_RELATIVE R_X86_64_RELATIVE |
12 | |
13 | #define R_NUM R_X86_64_NUM |
14 | |
15 | #define ELF_START_ADDR 0x400000 |
16 | #define ELF_PAGE_SIZE 0x200000 |
17 | |
18 | #define PCRELATIVE_DLLPLT 1 |
19 | #define RELOCATE_DLLPLT 1 |
20 | |
21 | #else /* !TARGET_DEFS_ONLY */ |
22 | |
23 | #include "tcc.h" |
24 | |
25 | #if !defined(ELF_OBJ_ONLY) || defined(TCC_TARGET_MACHO) |
26 | /* Returns 1 for a code relocation, 0 for a data relocation. For unknown |
27 | relocations, returns -1. */ |
28 | int code_reloc (int reloc_type) |
29 | { |
30 | switch (reloc_type) { |
31 | case R_X86_64_32: |
32 | case R_X86_64_32S: |
33 | case R_X86_64_64: |
34 | case R_X86_64_GOTPC32: |
35 | case R_X86_64_GOTPC64: |
36 | case R_X86_64_GOTPCREL: |
37 | case R_X86_64_GOTPCRELX: |
38 | case R_X86_64_REX_GOTPCRELX: |
39 | case R_X86_64_GOTTPOFF: |
40 | case R_X86_64_GOT32: |
41 | case R_X86_64_GOT64: |
42 | case R_X86_64_GLOB_DAT: |
43 | case R_X86_64_COPY: |
44 | case R_X86_64_RELATIVE: |
45 | case R_X86_64_GOTOFF64: |
46 | case R_X86_64_TLSGD: |
47 | case R_X86_64_TLSLD: |
48 | case R_X86_64_DTPOFF32: |
49 | case R_X86_64_TPOFF32: |
50 | return 0; |
51 | |
52 | case R_X86_64_PC32: |
53 | case R_X86_64_PC64: |
54 | case R_X86_64_PLT32: |
55 | case R_X86_64_PLTOFF64: |
56 | case R_X86_64_JUMP_SLOT: |
57 | return 1; |
58 | } |
59 | return -1; |
60 | } |
61 | |
62 | /* Returns an enumerator to describe whether and when the relocation needs a |
63 | GOT and/or PLT entry to be created. See tcc.h for a description of the |
64 | different values. */ |
65 | int gotplt_entry_type (int reloc_type) |
66 | { |
67 | switch (reloc_type) { |
68 | case R_X86_64_GLOB_DAT: |
69 | case R_X86_64_JUMP_SLOT: |
70 | case R_X86_64_COPY: |
71 | case R_X86_64_RELATIVE: |
72 | return NO_GOTPLT_ENTRY; |
73 | |
74 | /* The following relocs wouldn't normally need GOT or PLT |
75 | slots, but we need them for simplicity in the link |
76 | editor part. See our caller for comments. */ |
77 | case R_X86_64_32: |
78 | case R_X86_64_32S: |
79 | case R_X86_64_64: |
80 | case R_X86_64_PC32: |
81 | case R_X86_64_PC64: |
82 | return AUTO_GOTPLT_ENTRY; |
83 | |
84 | case R_X86_64_GOTTPOFF: |
85 | return BUILD_GOT_ONLY; |
86 | |
87 | case R_X86_64_GOT32: |
88 | case R_X86_64_GOT64: |
89 | case R_X86_64_GOTPC32: |
90 | case R_X86_64_GOTPC64: |
91 | case R_X86_64_GOTOFF64: |
92 | case R_X86_64_GOTPCREL: |
93 | case R_X86_64_GOTPCRELX: |
94 | case R_X86_64_TLSGD: |
95 | case R_X86_64_TLSLD: |
96 | case R_X86_64_DTPOFF32: |
97 | case R_X86_64_TPOFF32: |
98 | case R_X86_64_REX_GOTPCRELX: |
99 | case R_X86_64_PLT32: |
100 | case R_X86_64_PLTOFF64: |
101 | return ALWAYS_GOTPLT_ENTRY; |
102 | } |
103 | |
104 | return -1; |
105 | } |
106 | |
107 | #if !defined(TCC_TARGET_MACHO) || defined TCC_IS_NATIVE |
108 | ST_FUNC unsigned create_plt_entry(TCCState *s1, unsigned got_offset, struct sym_attr *attr) |
109 | { |
110 | Section *plt = s1->plt; |
111 | uint8_t *p; |
112 | int modrm; |
113 | unsigned plt_offset, relofs; |
114 | |
115 | modrm = 0x25; |
116 | |
117 | /* empty PLT: create PLT0 entry that pushes the library identifier |
118 | (GOT + PTR_SIZE) and jumps to ld.so resolution routine |
119 | (GOT + 2 * PTR_SIZE) */ |
120 | if (plt->data_offset == 0) { |
121 | p = section_ptr_add(plt, 16); |
122 | p[0] = 0xff; /* pushl got + PTR_SIZE */ |
123 | p[1] = modrm + 0x10; |
124 | write32le(p + 2, PTR_SIZE); |
125 | p[6] = 0xff; /* jmp *(got + PTR_SIZE * 2) */ |
126 | p[7] = modrm; |
127 | write32le(p + 8, PTR_SIZE * 2); |
128 | } |
129 | plt_offset = plt->data_offset; |
130 | |
131 | /* The PLT slot refers to the relocation entry it needs via offset. |
132 | The reloc entry is created below, so its offset is the current |
133 | data_offset */ |
134 | relofs = s1->got->reloc ? s1->got->reloc->data_offset : 0; |
135 | |
136 | /* Jump to GOT entry where ld.so initially put the address of ip + 4 */ |
137 | p = section_ptr_add(plt, 16); |
138 | p[0] = 0xff; /* jmp *(got + x) */ |
139 | p[1] = modrm; |
140 | write32le(p + 2, got_offset); |
141 | p[6] = 0x68; /* push $xxx */ |
142 | /* On x86-64, the relocation is referred to by _index_ */ |
143 | write32le(p + 7, relofs / sizeof (ElfW_Rel)); |
144 | p[11] = 0xe9; /* jmp plt_start */ |
145 | write32le(p + 12, -(plt->data_offset)); |
146 | return plt_offset; |
147 | } |
148 | |
149 | /* relocate the PLT: compute addresses and offsets in the PLT now that final |
150 | address for PLT and GOT are known (see fill_program_header) */ |
151 | ST_FUNC void relocate_plt(TCCState *s1) |
152 | { |
153 | uint8_t *p, *p_end; |
154 | |
155 | if (!s1->plt) |
156 | return; |
157 | |
158 | p = s1->plt->data; |
159 | p_end = p + s1->plt->data_offset; |
160 | |
161 | if (p < p_end) { |
162 | int x = s1->got->sh_addr - s1->plt->sh_addr - 6; |
163 | add32le(p + 2, x); |
164 | add32le(p + 8, x - 6); |
165 | p += 16; |
166 | while (p < p_end) { |
167 | add32le(p + 2, x + (s1->plt->data - p)); |
168 | p += 16; |
169 | } |
170 | } |
171 | } |
172 | #endif |
173 | #endif |
174 | |
175 | void relocate(TCCState *s1, ElfW_Rel *rel, int type, unsigned char *ptr, addr_t addr, addr_t val) |
176 | { |
177 | int sym_index, esym_index; |
178 | |
179 | sym_index = ELFW(R_SYM)(rel->r_info); |
180 | |
181 | switch (type) { |
182 | case R_X86_64_64: |
183 | if (s1->output_type == TCC_OUTPUT_DLL) { |
184 | esym_index = get_sym_attr(s1, sym_index, 0)->dyn_index; |
185 | qrel->r_offset = rel->r_offset; |
186 | if (esym_index) { |
187 | qrel->r_info = ELFW(R_INFO)(esym_index, R_X86_64_64); |
188 | qrel->r_addend = rel->r_addend; |
189 | qrel++; |
190 | break; |
191 | } else { |
192 | qrel->r_info = ELFW(R_INFO)(0, R_X86_64_RELATIVE); |
193 | qrel->r_addend = read64le(ptr) + val; |
194 | qrel++; |
195 | } |
196 | } |
197 | add64le(ptr, val); |
198 | break; |
199 | case R_X86_64_32: |
200 | case R_X86_64_32S: |
201 | if (s1->output_type == TCC_OUTPUT_DLL) { |
202 | /* XXX: this logic may depend on TCC's codegen |
203 | now TCC uses R_X86_64_32 even for a 64bit pointer */ |
204 | qrel->r_offset = rel->r_offset; |
205 | qrel->r_info = ELFW(R_INFO)(0, R_X86_64_RELATIVE); |
206 | /* Use sign extension! */ |
207 | qrel->r_addend = (int)read32le(ptr) + val; |
208 | qrel++; |
209 | } |
210 | add32le(ptr, val); |
211 | break; |
212 | |
213 | case R_X86_64_PC32: |
214 | if (s1->output_type == TCC_OUTPUT_DLL) { |
215 | /* DLL relocation */ |
216 | esym_index = get_sym_attr(s1, sym_index, 0)->dyn_index; |
217 | if (esym_index) { |
218 | qrel->r_offset = rel->r_offset; |
219 | qrel->r_info = ELFW(R_INFO)(esym_index, R_X86_64_PC32); |
220 | /* Use sign extension! */ |
221 | qrel->r_addend = (int)read32le(ptr) + rel->r_addend; |
222 | qrel++; |
223 | break; |
224 | } |
225 | } |
226 | goto plt32pc32; |
227 | |
228 | case R_X86_64_PLT32: |
229 | /* fallthrough: val already holds the PLT slot address */ |
230 | |
231 | plt32pc32: |
232 | { |
233 | long long diff; |
234 | diff = (long long)val - addr; |
235 | if (diff < -2147483648LL || diff > 2147483647LL) { |
236 | tcc_error("internal error: relocation failed" ); |
237 | } |
238 | add32le(ptr, diff); |
239 | } |
240 | break; |
241 | |
242 | case R_X86_64_PLTOFF64: |
243 | add64le(ptr, val - s1->got->sh_addr + rel->r_addend); |
244 | break; |
245 | |
246 | case R_X86_64_PC64: |
247 | if (s1->output_type == TCC_OUTPUT_DLL) { |
248 | /* DLL relocation */ |
249 | esym_index = get_sym_attr(s1, sym_index, 0)->dyn_index; |
250 | if (esym_index) { |
251 | qrel->r_offset = rel->r_offset; |
252 | qrel->r_info = ELFW(R_INFO)(esym_index, R_X86_64_PC64); |
253 | qrel->r_addend = read64le(ptr) + rel->r_addend; |
254 | qrel++; |
255 | break; |
256 | } |
257 | } |
258 | add64le(ptr, val - addr); |
259 | break; |
260 | |
261 | case R_X86_64_GLOB_DAT: |
262 | case R_X86_64_JUMP_SLOT: |
263 | /* They don't need addend */ |
264 | write64le(ptr, val - rel->r_addend); |
265 | break; |
266 | case R_X86_64_GOTPCREL: |
267 | case R_X86_64_GOTPCRELX: |
268 | case R_X86_64_REX_GOTPCRELX: |
269 | add32le(ptr, s1->got->sh_addr - addr + |
270 | get_sym_attr(s1, sym_index, 0)->got_offset - 4); |
271 | break; |
272 | case R_X86_64_GOTPC32: |
273 | add32le(ptr, s1->got->sh_addr - addr + rel->r_addend); |
274 | break; |
275 | case R_X86_64_GOTPC64: |
276 | add64le(ptr, s1->got->sh_addr - addr + rel->r_addend); |
277 | break; |
278 | case R_X86_64_GOTTPOFF: |
279 | add32le(ptr, val - s1->got->sh_addr); |
280 | break; |
281 | case R_X86_64_GOT32: |
282 | /* we load the got offset */ |
283 | add32le(ptr, get_sym_attr(s1, sym_index, 0)->got_offset); |
284 | break; |
285 | case R_X86_64_GOT64: |
286 | /* we load the got offset */ |
287 | add64le(ptr, get_sym_attr(s1, sym_index, 0)->got_offset); |
288 | break; |
289 | case R_X86_64_GOTOFF64: |
290 | add64le(ptr, val - s1->got->sh_addr); |
291 | break; |
292 | case R_X86_64_TLSGD: |
293 | { |
294 | static const unsigned char expect[] = { |
295 | /* .byte 0x66; lea 0(%rip),%rdi */ |
296 | 0x66, 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, |
297 | /* .word 0x6666; rex64; call __tls_get_addr@PLT */ |
298 | 0x66, 0x66, 0x48, 0xe8, 0x00, 0x00, 0x00, 0x00 }; |
299 | static const unsigned char replace[] = { |
300 | /* mov %fs:0,%rax */ |
301 | 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, |
302 | /* lea -4(%rax),%rax */ |
303 | 0x48, 0x8d, 0x80, 0x00, 0x00, 0x00, 0x00 }; |
304 | |
305 | if (memcmp (ptr-4, expect, sizeof(expect)) == 0) { |
306 | ElfW(Sym) *sym; |
307 | Section *sec; |
308 | int32_t x; |
309 | |
310 | memcpy(ptr-4, replace, sizeof(replace)); |
311 | rel[1].r_info = ELFW(R_INFO)(0, R_X86_64_NONE); |
312 | sym = &((ElfW(Sym) *)symtab_section->data)[sym_index]; |
313 | sec = s1->sections[sym->st_shndx]; |
314 | x = sym->st_value - sec->sh_addr - sec->data_offset; |
315 | add32le(ptr + 8, x); |
316 | } |
317 | else |
318 | tcc_error("unexpected R_X86_64_TLSGD pattern" ); |
319 | } |
320 | break; |
321 | case R_X86_64_TLSLD: |
322 | { |
323 | static const unsigned char expect[] = { |
324 | /* lea 0(%rip),%rdi */ |
325 | 0x48, 0x8d, 0x3d, 0x00, 0x00, 0x00, 0x00, |
326 | /* call __tls_get_addr@PLT */ |
327 | 0xe8, 0x00, 0x00, 0x00, 0x00 }; |
328 | static const unsigned char replace[] = { |
329 | /* data16 data16 data16 mov %fs:0,%rax */ |
330 | 0x66, 0x66, 0x66, 0x64, 0x48, 0x8b, 0x04, 0x25, |
331 | 0x00, 0x00, 0x00, 0x00 }; |
332 | |
333 | if (memcmp (ptr-3, expect, sizeof(expect)) == 0) { |
334 | memcpy(ptr-3, replace, sizeof(replace)); |
335 | rel[1].r_info = ELFW(R_INFO)(0, R_X86_64_NONE); |
336 | } |
337 | else |
338 | tcc_error("unexpected R_X86_64_TLSLD pattern" ); |
339 | } |
340 | break; |
341 | case R_X86_64_DTPOFF32: |
342 | case R_X86_64_TPOFF32: |
343 | { |
344 | ElfW(Sym) *sym; |
345 | Section *sec; |
346 | int32_t x; |
347 | |
348 | sym = &((ElfW(Sym) *)symtab_section->data)[sym_index]; |
349 | sec = s1->sections[sym->st_shndx]; |
350 | x = val - sec->sh_addr - sec->data_offset; |
351 | add32le(ptr, x); |
352 | } |
353 | break; |
354 | case R_X86_64_NONE: |
355 | break; |
356 | case R_X86_64_RELATIVE: |
357 | #ifdef TCC_TARGET_PE |
358 | add32le(ptr, val - s1->pe_imagebase); |
359 | #endif |
360 | /* do nothing */ |
361 | break; |
362 | } |
363 | } |
364 | |
365 | #endif /* !TARGET_DEFS_ONLY */ |
366 | |