1//===-------------------- UnwindRegistersRestore.S ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "assembly.h"
10
11 .text
12
13#if !defined(__USING_SJLJ_EXCEPTIONS__)
14
15#if defined(__i386__)
16DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_x866jumptoEv)
17#
18# void libunwind::Registers_x86::jumpto()
19#
20#if defined(_WIN32)
21# On windows, the 'this' pointer is passed in ecx instead of on the stack
22 movl %ecx, %eax
23#else
24# On entry:
25# + +
26# +-----------------------+
27# + thread_state pointer +
28# +-----------------------+
29# + return address +
30# +-----------------------+ <-- SP
31# + +
32 movl 4(%esp), %eax
33#endif
34 # set up eax and ret on new stack location
35 movl 28(%eax), %edx # edx holds new stack pointer
36 subl $8,%edx
37 movl %edx, 28(%eax)
38 movl 0(%eax), %ebx
39 movl %ebx, 0(%edx)
40 movl 40(%eax), %ebx
41 movl %ebx, 4(%edx)
42 # we now have ret and eax pushed onto where new stack will be
43 # restore all registers
44 movl 4(%eax), %ebx
45 movl 8(%eax), %ecx
46 movl 12(%eax), %edx
47 movl 16(%eax), %edi
48 movl 20(%eax), %esi
49 movl 24(%eax), %ebp
50 movl 28(%eax), %esp
51 # skip ss
52 # skip eflags
53 pop %eax # eax was already pushed on new stack
54 ret # eip was already pushed on new stack
55 # skip cs
56 # skip ds
57 # skip es
58 # skip fs
59 # skip gs
60
61#elif defined(__x86_64__)
62
63DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind16Registers_x86_646jumptoEv)
64#
65# void libunwind::Registers_x86_64::jumpto()
66#
67#if defined(_WIN64)
68# On entry, thread_state pointer is in rcx; move it into rdi
69# to share restore code below. Since this routine restores and
70# overwrites all registers, we can use the same registers for
71# pointers and temporaries as on unix even though win64 normally
72# mustn't clobber some of them.
73 movq %rcx, %rdi
74#else
75# On entry, thread_state pointer is in rdi
76#endif
77
78 movq 56(%rdi), %rax # rax holds new stack pointer
79 subq $16, %rax
80 movq %rax, 56(%rdi)
81 movq 32(%rdi), %rbx # store new rdi on new stack
82 movq %rbx, 0(%rax)
83 movq 128(%rdi), %rbx # store new rip on new stack
84 movq %rbx, 8(%rax)
85 # restore all registers
86 movq 0(%rdi), %rax
87 movq 8(%rdi), %rbx
88 movq 16(%rdi), %rcx
89 movq 24(%rdi), %rdx
90 # restore rdi later
91 movq 40(%rdi), %rsi
92 movq 48(%rdi), %rbp
93 # restore rsp later
94 movq 64(%rdi), %r8
95 movq 72(%rdi), %r9
96 movq 80(%rdi), %r10
97 movq 88(%rdi), %r11
98 movq 96(%rdi), %r12
99 movq 104(%rdi), %r13
100 movq 112(%rdi), %r14
101 movq 120(%rdi), %r15
102 # skip rflags
103 # skip cs
104 # skip fs
105 # skip gs
106
107#if defined(_WIN64)
108 movdqu 176(%rdi),%xmm0
109 movdqu 192(%rdi),%xmm1
110 movdqu 208(%rdi),%xmm2
111 movdqu 224(%rdi),%xmm3
112 movdqu 240(%rdi),%xmm4
113 movdqu 256(%rdi),%xmm5
114 movdqu 272(%rdi),%xmm6
115 movdqu 288(%rdi),%xmm7
116 movdqu 304(%rdi),%xmm8
117 movdqu 320(%rdi),%xmm9
118 movdqu 336(%rdi),%xmm10
119 movdqu 352(%rdi),%xmm11
120 movdqu 368(%rdi),%xmm12
121 movdqu 384(%rdi),%xmm13
122 movdqu 400(%rdi),%xmm14
123 movdqu 416(%rdi),%xmm15
124#endif
125 movq 56(%rdi), %rsp # cut back rsp to new location
126 pop %rdi # rdi was saved here earlier
127 ret # rip was saved here
128
129
130#elif defined(__powerpc64__)
131
132DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_ppc646jumptoEv)
133//
134// void libunwind::Registers_ppc64::jumpto()
135//
136// On entry:
137// thread_state pointer is in r3
138//
139
140// load register (GPR)
141#define PPC64_LR(n) \
142 ld %r##n, (8 * (n + 2))(%r3)
143
144 // restore integral registers
145 // skip r0 for now
146 // skip r1 for now
147 PPC64_LR(2)
148 // skip r3 for now
149 // skip r4 for now
150 // skip r5 for now
151 PPC64_LR(6)
152 PPC64_LR(7)
153 PPC64_LR(8)
154 PPC64_LR(9)
155 PPC64_LR(10)
156 PPC64_LR(11)
157 PPC64_LR(12)
158 PPC64_LR(13)
159 PPC64_LR(14)
160 PPC64_LR(15)
161 PPC64_LR(16)
162 PPC64_LR(17)
163 PPC64_LR(18)
164 PPC64_LR(19)
165 PPC64_LR(20)
166 PPC64_LR(21)
167 PPC64_LR(22)
168 PPC64_LR(23)
169 PPC64_LR(24)
170 PPC64_LR(25)
171 PPC64_LR(26)
172 PPC64_LR(27)
173 PPC64_LR(28)
174 PPC64_LR(29)
175 PPC64_LR(30)
176 PPC64_LR(31)
177
178#ifdef PPC64_HAS_VMX
179
180 // restore VS registers
181 // (note that this also restores floating point registers and V registers,
182 // because part of VS is mapped to these registers)
183
184 addi %r4, %r3, PPC64_OFFS_FP
185
186// load VS register
187#define PPC64_LVS(n) \
188 lxvd2x %vs##n, 0, %r4 ;\
189 addi %r4, %r4, 16
190
191 // restore the first 32 VS regs (and also all floating point regs)
192 PPC64_LVS(0)
193 PPC64_LVS(1)
194 PPC64_LVS(2)
195 PPC64_LVS(3)
196 PPC64_LVS(4)
197 PPC64_LVS(5)
198 PPC64_LVS(6)
199 PPC64_LVS(7)
200 PPC64_LVS(8)
201 PPC64_LVS(9)
202 PPC64_LVS(10)
203 PPC64_LVS(11)
204 PPC64_LVS(12)
205 PPC64_LVS(13)
206 PPC64_LVS(14)
207 PPC64_LVS(15)
208 PPC64_LVS(16)
209 PPC64_LVS(17)
210 PPC64_LVS(18)
211 PPC64_LVS(19)
212 PPC64_LVS(20)
213 PPC64_LVS(21)
214 PPC64_LVS(22)
215 PPC64_LVS(23)
216 PPC64_LVS(24)
217 PPC64_LVS(25)
218 PPC64_LVS(26)
219 PPC64_LVS(27)
220 PPC64_LVS(28)
221 PPC64_LVS(29)
222 PPC64_LVS(30)
223 PPC64_LVS(31)
224
225 // use VRSAVE to conditionally restore the remaining VS regs,
226 // that are where the V regs are mapped
227
228 ld %r5, PPC64_OFFS_VRSAVE(%r3) // test VRsave
229 cmpwi %r5, 0
230 beq Lnovec
231
232// conditionally load VS
233#define PPC64_CLVS_BOTTOM(n) \
234 beq Ldone##n ;\
235 addi %r4, %r3, PPC64_OFFS_FP + n * 16 ;\
236 lxvd2x %vs##n, 0, %r4 ;\
237Ldone##n:
238
239#define PPC64_CLVSl(n) \
240 andis. %r0, %r5, (1<<(47-n)) ;\
241PPC64_CLVS_BOTTOM(n)
242
243#define PPC64_CLVSh(n) \
244 andi. %r0, %r5, (1<<(63-n)) ;\
245PPC64_CLVS_BOTTOM(n)
246
247 PPC64_CLVSl(32)
248 PPC64_CLVSl(33)
249 PPC64_CLVSl(34)
250 PPC64_CLVSl(35)
251 PPC64_CLVSl(36)
252 PPC64_CLVSl(37)
253 PPC64_CLVSl(38)
254 PPC64_CLVSl(39)
255 PPC64_CLVSl(40)
256 PPC64_CLVSl(41)
257 PPC64_CLVSl(42)
258 PPC64_CLVSl(43)
259 PPC64_CLVSl(44)
260 PPC64_CLVSl(45)
261 PPC64_CLVSl(46)
262 PPC64_CLVSl(47)
263 PPC64_CLVSh(48)
264 PPC64_CLVSh(49)
265 PPC64_CLVSh(50)
266 PPC64_CLVSh(51)
267 PPC64_CLVSh(52)
268 PPC64_CLVSh(53)
269 PPC64_CLVSh(54)
270 PPC64_CLVSh(55)
271 PPC64_CLVSh(56)
272 PPC64_CLVSh(57)
273 PPC64_CLVSh(58)
274 PPC64_CLVSh(59)
275 PPC64_CLVSh(60)
276 PPC64_CLVSh(61)
277 PPC64_CLVSh(62)
278 PPC64_CLVSh(63)
279
280#else
281
282// load FP register
283#define PPC64_LF(n) \
284 lfd %f##n, (PPC64_OFFS_FP + n * 16)(%r3)
285
286 // restore float registers
287 PPC64_LF(0)
288 PPC64_LF(1)
289 PPC64_LF(2)
290 PPC64_LF(3)
291 PPC64_LF(4)
292 PPC64_LF(5)
293 PPC64_LF(6)
294 PPC64_LF(7)
295 PPC64_LF(8)
296 PPC64_LF(9)
297 PPC64_LF(10)
298 PPC64_LF(11)
299 PPC64_LF(12)
300 PPC64_LF(13)
301 PPC64_LF(14)
302 PPC64_LF(15)
303 PPC64_LF(16)
304 PPC64_LF(17)
305 PPC64_LF(18)
306 PPC64_LF(19)
307 PPC64_LF(20)
308 PPC64_LF(21)
309 PPC64_LF(22)
310 PPC64_LF(23)
311 PPC64_LF(24)
312 PPC64_LF(25)
313 PPC64_LF(26)
314 PPC64_LF(27)
315 PPC64_LF(28)
316 PPC64_LF(29)
317 PPC64_LF(30)
318 PPC64_LF(31)
319
320 // restore vector registers if any are in use
321 ld %r5, PPC64_OFFS_VRSAVE(%r3) // test VRsave
322 cmpwi %r5, 0
323 beq Lnovec
324
325 subi %r4, %r1, 16
326 // r4 is now a 16-byte aligned pointer into the red zone
327 // the _vectorScalarRegisters may not be 16-byte aligned
328 // so copy via red zone temp buffer
329
330#define PPC64_CLV_UNALIGNED_BOTTOM(n) \
331 beq Ldone##n ;\
332 ld %r0, (PPC64_OFFS_V + n * 16)(%r3) ;\
333 std %r0, 0(%r4) ;\
334 ld %r0, (PPC64_OFFS_V + n * 16 + 8)(%r3) ;\
335 std %r0, 8(%r4) ;\
336 lvx %v##n, 0, %r4 ;\
337Ldone ## n:
338
339#define PPC64_CLV_UNALIGNEDl(n) \
340 andis. %r0, %r5, (1<<(15-n)) ;\
341PPC64_CLV_UNALIGNED_BOTTOM(n)
342
343#define PPC64_CLV_UNALIGNEDh(n) \
344 andi. %r0, %r5, (1<<(31-n)) ;\
345PPC64_CLV_UNALIGNED_BOTTOM(n)
346
347 PPC64_CLV_UNALIGNEDl(0)
348 PPC64_CLV_UNALIGNEDl(1)
349 PPC64_CLV_UNALIGNEDl(2)
350 PPC64_CLV_UNALIGNEDl(3)
351 PPC64_CLV_UNALIGNEDl(4)
352 PPC64_CLV_UNALIGNEDl(5)
353 PPC64_CLV_UNALIGNEDl(6)
354 PPC64_CLV_UNALIGNEDl(7)
355 PPC64_CLV_UNALIGNEDl(8)
356 PPC64_CLV_UNALIGNEDl(9)
357 PPC64_CLV_UNALIGNEDl(10)
358 PPC64_CLV_UNALIGNEDl(11)
359 PPC64_CLV_UNALIGNEDl(12)
360 PPC64_CLV_UNALIGNEDl(13)
361 PPC64_CLV_UNALIGNEDl(14)
362 PPC64_CLV_UNALIGNEDl(15)
363 PPC64_CLV_UNALIGNEDh(16)
364 PPC64_CLV_UNALIGNEDh(17)
365 PPC64_CLV_UNALIGNEDh(18)
366 PPC64_CLV_UNALIGNEDh(19)
367 PPC64_CLV_UNALIGNEDh(20)
368 PPC64_CLV_UNALIGNEDh(21)
369 PPC64_CLV_UNALIGNEDh(22)
370 PPC64_CLV_UNALIGNEDh(23)
371 PPC64_CLV_UNALIGNEDh(24)
372 PPC64_CLV_UNALIGNEDh(25)
373 PPC64_CLV_UNALIGNEDh(26)
374 PPC64_CLV_UNALIGNEDh(27)
375 PPC64_CLV_UNALIGNEDh(28)
376 PPC64_CLV_UNALIGNEDh(29)
377 PPC64_CLV_UNALIGNEDh(30)
378 PPC64_CLV_UNALIGNEDh(31)
379
380#endif
381
382Lnovec:
383 ld %r0, PPC64_OFFS_CR(%r3)
384 mtcr %r0
385 ld %r0, PPC64_OFFS_SRR0(%r3)
386 mtctr %r0
387
388 PPC64_LR(0)
389 PPC64_LR(5)
390 PPC64_LR(4)
391 PPC64_LR(1)
392 PPC64_LR(3)
393 bctr
394
395#elif defined(__ppc__)
396
397DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_ppc6jumptoEv)
398;
399; void libunwind::Registers_ppc::jumpto()
400;
401; On entry:
402; thread_state pointer is in r3
403;
404
405 ; restore integral registerrs
406 ; skip r0 for now
407 ; skip r1 for now
408 lwz r2, 16(r3)
409 ; skip r3 for now
410 ; skip r4 for now
411 ; skip r5 for now
412 lwz r6, 32(r3)
413 lwz r7, 36(r3)
414 lwz r8, 40(r3)
415 lwz r9, 44(r3)
416 lwz r10, 48(r3)
417 lwz r11, 52(r3)
418 lwz r12, 56(r3)
419 lwz r13, 60(r3)
420 lwz r14, 64(r3)
421 lwz r15, 68(r3)
422 lwz r16, 72(r3)
423 lwz r17, 76(r3)
424 lwz r18, 80(r3)
425 lwz r19, 84(r3)
426 lwz r20, 88(r3)
427 lwz r21, 92(r3)
428 lwz r22, 96(r3)
429 lwz r23,100(r3)
430 lwz r24,104(r3)
431 lwz r25,108(r3)
432 lwz r26,112(r3)
433 lwz r27,116(r3)
434 lwz r28,120(r3)
435 lwz r29,124(r3)
436 lwz r30,128(r3)
437 lwz r31,132(r3)
438
439 ; restore float registers
440 lfd f0, 160(r3)
441 lfd f1, 168(r3)
442 lfd f2, 176(r3)
443 lfd f3, 184(r3)
444 lfd f4, 192(r3)
445 lfd f5, 200(r3)
446 lfd f6, 208(r3)
447 lfd f7, 216(r3)
448 lfd f8, 224(r3)
449 lfd f9, 232(r3)
450 lfd f10,240(r3)
451 lfd f11,248(r3)
452 lfd f12,256(r3)
453 lfd f13,264(r3)
454 lfd f14,272(r3)
455 lfd f15,280(r3)
456 lfd f16,288(r3)
457 lfd f17,296(r3)
458 lfd f18,304(r3)
459 lfd f19,312(r3)
460 lfd f20,320(r3)
461 lfd f21,328(r3)
462 lfd f22,336(r3)
463 lfd f23,344(r3)
464 lfd f24,352(r3)
465 lfd f25,360(r3)
466 lfd f26,368(r3)
467 lfd f27,376(r3)
468 lfd f28,384(r3)
469 lfd f29,392(r3)
470 lfd f30,400(r3)
471 lfd f31,408(r3)
472
473 ; restore vector registers if any are in use
474 lwz r5,156(r3) ; test VRsave
475 cmpwi r5,0
476 beq Lnovec
477
478 subi r4,r1,16
479 rlwinm r4,r4,0,0,27 ; mask low 4-bits
480 ; r4 is now a 16-byte aligned pointer into the red zone
481 ; the _vectorRegisters may not be 16-byte aligned so copy via red zone temp buffer
482
483
484#define LOAD_VECTOR_UNALIGNEDl(_index) \
485 andis. r0,r5,(1<<(15-_index)) @\
486 beq Ldone ## _index @\
487 lwz r0, 424+_index*16(r3) @\
488 stw r0, 0(r4) @\
489 lwz r0, 424+_index*16+4(r3) @\
490 stw r0, 4(r4) @\
491 lwz r0, 424+_index*16+8(r3) @\
492 stw r0, 8(r4) @\
493 lwz r0, 424+_index*16+12(r3)@\
494 stw r0, 12(r4) @\
495 lvx v ## _index,0,r4 @\
496Ldone ## _index:
497
498#define LOAD_VECTOR_UNALIGNEDh(_index) \
499 andi. r0,r5,(1<<(31-_index)) @\
500 beq Ldone ## _index @\
501 lwz r0, 424+_index*16(r3) @\
502 stw r0, 0(r4) @\
503 lwz r0, 424+_index*16+4(r3) @\
504 stw r0, 4(r4) @\
505 lwz r0, 424+_index*16+8(r3) @\
506 stw r0, 8(r4) @\
507 lwz r0, 424+_index*16+12(r3)@\
508 stw r0, 12(r4) @\
509 lvx v ## _index,0,r4 @\
510 Ldone ## _index:
511
512
513 LOAD_VECTOR_UNALIGNEDl(0)
514 LOAD_VECTOR_UNALIGNEDl(1)
515 LOAD_VECTOR_UNALIGNEDl(2)
516 LOAD_VECTOR_UNALIGNEDl(3)
517 LOAD_VECTOR_UNALIGNEDl(4)
518 LOAD_VECTOR_UNALIGNEDl(5)
519 LOAD_VECTOR_UNALIGNEDl(6)
520 LOAD_VECTOR_UNALIGNEDl(7)
521 LOAD_VECTOR_UNALIGNEDl(8)
522 LOAD_VECTOR_UNALIGNEDl(9)
523 LOAD_VECTOR_UNALIGNEDl(10)
524 LOAD_VECTOR_UNALIGNEDl(11)
525 LOAD_VECTOR_UNALIGNEDl(12)
526 LOAD_VECTOR_UNALIGNEDl(13)
527 LOAD_VECTOR_UNALIGNEDl(14)
528 LOAD_VECTOR_UNALIGNEDl(15)
529 LOAD_VECTOR_UNALIGNEDh(16)
530 LOAD_VECTOR_UNALIGNEDh(17)
531 LOAD_VECTOR_UNALIGNEDh(18)
532 LOAD_VECTOR_UNALIGNEDh(19)
533 LOAD_VECTOR_UNALIGNEDh(20)
534 LOAD_VECTOR_UNALIGNEDh(21)
535 LOAD_VECTOR_UNALIGNEDh(22)
536 LOAD_VECTOR_UNALIGNEDh(23)
537 LOAD_VECTOR_UNALIGNEDh(24)
538 LOAD_VECTOR_UNALIGNEDh(25)
539 LOAD_VECTOR_UNALIGNEDh(26)
540 LOAD_VECTOR_UNALIGNEDh(27)
541 LOAD_VECTOR_UNALIGNEDh(28)
542 LOAD_VECTOR_UNALIGNEDh(29)
543 LOAD_VECTOR_UNALIGNEDh(30)
544 LOAD_VECTOR_UNALIGNEDh(31)
545
546Lnovec:
547 lwz r0, 136(r3) ; __cr
548 mtocrf 255,r0
549 lwz r0, 148(r3) ; __ctr
550 mtctr r0
551 lwz r0, 0(r3) ; __ssr0
552 mtctr r0
553 lwz r0, 8(r3) ; do r0 now
554 lwz r5,28(r3) ; do r5 now
555 lwz r4,24(r3) ; do r4 now
556 lwz r1,12(r3) ; do sp now
557 lwz r3,20(r3) ; do r3 last
558 bctr
559
560#elif defined(__arm64__) || defined(__aarch64__)
561
562//
563// void libunwind::Registers_arm64::jumpto()
564//
565// On entry:
566// thread_state pointer is in x0
567//
568 .p2align 2
569DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_arm646jumptoEv)
570 // skip restore of x0,x1 for now
571 ldp x2, x3, [x0, #0x010]
572 ldp x4, x5, [x0, #0x020]
573 ldp x6, x7, [x0, #0x030]
574 ldp x8, x9, [x0, #0x040]
575 ldp x10,x11, [x0, #0x050]
576 ldp x12,x13, [x0, #0x060]
577 ldp x14,x15, [x0, #0x070]
578 ldp x16,x17, [x0, #0x080]
579 ldp x18,x19, [x0, #0x090]
580 ldp x20,x21, [x0, #0x0A0]
581 ldp x22,x23, [x0, #0x0B0]
582 ldp x24,x25, [x0, #0x0C0]
583 ldp x26,x27, [x0, #0x0D0]
584 ldp x28,x29, [x0, #0x0E0]
585 ldr x30, [x0, #0x100] // restore pc into lr
586 ldr x1, [x0, #0x0F8]
587 mov sp,x1 // restore sp
588
589 ldp d0, d1, [x0, #0x110]
590 ldp d2, d3, [x0, #0x120]
591 ldp d4, d5, [x0, #0x130]
592 ldp d6, d7, [x0, #0x140]
593 ldp d8, d9, [x0, #0x150]
594 ldp d10,d11, [x0, #0x160]
595 ldp d12,d13, [x0, #0x170]
596 ldp d14,d15, [x0, #0x180]
597 ldp d16,d17, [x0, #0x190]
598 ldp d18,d19, [x0, #0x1A0]
599 ldp d20,d21, [x0, #0x1B0]
600 ldp d22,d23, [x0, #0x1C0]
601 ldp d24,d25, [x0, #0x1D0]
602 ldp d26,d27, [x0, #0x1E0]
603 ldp d28,d29, [x0, #0x1F0]
604 ldr d30, [x0, #0x200]
605 ldr d31, [x0, #0x208]
606
607 ldp x0, x1, [x0, #0x000] // restore x0,x1
608 ret x30 // jump to pc
609
610#elif defined(__arm__) && !defined(__APPLE__)
611
612#if !defined(__ARM_ARCH_ISA_ARM)
613 .thumb
614#endif
615
616@
617@ void libunwind::Registers_arm::restoreCoreAndJumpTo()
618@
619@ On entry:
620@ thread_state pointer is in r0
621@
622 .p2align 2
623DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm20restoreCoreAndJumpToEv)
624#if !defined(__ARM_ARCH_ISA_ARM) && __ARM_ARCH_ISA_THUMB == 1
625 @ r8-r11: ldm into r1-r4, then mov to r8-r11
626 adds r0, #0x20
627 ldm r0!, {r1-r4}
628 subs r0, #0x30
629 mov r8, r1
630 mov r9, r2
631 mov r10, r3
632 mov r11, r4
633 @ r12 does not need loading, it it the intra-procedure-call scratch register
634 ldr r2, [r0, #0x34]
635 ldr r3, [r0, #0x3c]
636 mov sp, r2
637 mov lr, r3 @ restore pc into lr
638 ldm r0, {r0-r7}
639#else
640 @ Use lr as base so that r0 can be restored.
641 mov lr, r0
642 @ 32bit thumb-2 restrictions for ldm:
643 @ . the sp (r13) cannot be in the list
644 @ . the pc (r15) and lr (r14) cannot both be in the list in an LDM instruction
645 ldm lr, {r0-r12}
646 ldr sp, [lr, #52]
647 ldr lr, [lr, #60] @ restore pc into lr
648#endif
649 JMP(lr)
650
651@
652@ static void libunwind::Registers_arm::restoreVFPWithFLDMD(unw_fpreg_t* values)
653@
654@ On entry:
655@ values pointer is in r0
656@
657 .p2align 2
658#if defined(__ELF__)
659 .fpu vfpv3-d16
660#endif
661DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMDEPv)
662 @ VFP and iwMMX instructions are only available when compiling with the flags
663 @ that enable them. We do not want to do that in the library (because we do not
664 @ want the compiler to generate instructions that access those) but this is
665 @ only accessed if the personality routine needs these registers. Use of
666 @ these registers implies they are, actually, available on the target, so
667 @ it's ok to execute.
668 @ So, generate the instruction using the corresponding coprocessor mnemonic.
669 vldmia r0, {d0-d15}
670 JMP(lr)
671
672@
673@ static void libunwind::Registers_arm::restoreVFPWithFLDMX(unw_fpreg_t* values)
674@
675@ On entry:
676@ values pointer is in r0
677@
678 .p2align 2
679#if defined(__ELF__)
680 .fpu vfpv3-d16
681#endif
682DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreVFPWithFLDMXEPv)
683 vldmia r0, {d0-d15} @ fldmiax is deprecated in ARMv7+ and now behaves like vldmia
684 JMP(lr)
685
686@
687@ static void libunwind::Registers_arm::restoreVFPv3(unw_fpreg_t* values)
688@
689@ On entry:
690@ values pointer is in r0
691@
692 .p2align 2
693#if defined(__ELF__)
694 .fpu vfpv3
695#endif
696DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreVFPv3EPv)
697 vldmia r0, {d16-d31}
698 JMP(lr)
699
700#if defined(__ARM_WMMX)
701
702@
703@ static void libunwind::Registers_arm::restoreiWMMX(unw_fpreg_t* values)
704@
705@ On entry:
706@ values pointer is in r0
707@
708 .p2align 2
709#if defined(__ELF__)
710 .arch armv5te
711#endif
712DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm12restoreiWMMXEPv)
713 ldcl p1, cr0, [r0], #8 @ wldrd wR0, [r0], #8
714 ldcl p1, cr1, [r0], #8 @ wldrd wR1, [r0], #8
715 ldcl p1, cr2, [r0], #8 @ wldrd wR2, [r0], #8
716 ldcl p1, cr3, [r0], #8 @ wldrd wR3, [r0], #8
717 ldcl p1, cr4, [r0], #8 @ wldrd wR4, [r0], #8
718 ldcl p1, cr5, [r0], #8 @ wldrd wR5, [r0], #8
719 ldcl p1, cr6, [r0], #8 @ wldrd wR6, [r0], #8
720 ldcl p1, cr7, [r0], #8 @ wldrd wR7, [r0], #8
721 ldcl p1, cr8, [r0], #8 @ wldrd wR8, [r0], #8
722 ldcl p1, cr9, [r0], #8 @ wldrd wR9, [r0], #8
723 ldcl p1, cr10, [r0], #8 @ wldrd wR10, [r0], #8
724 ldcl p1, cr11, [r0], #8 @ wldrd wR11, [r0], #8
725 ldcl p1, cr12, [r0], #8 @ wldrd wR12, [r0], #8
726 ldcl p1, cr13, [r0], #8 @ wldrd wR13, [r0], #8
727 ldcl p1, cr14, [r0], #8 @ wldrd wR14, [r0], #8
728 ldcl p1, cr15, [r0], #8 @ wldrd wR15, [r0], #8
729 JMP(lr)
730
731@
732@ static void libunwind::Registers_arm::restoreiWMMXControl(unw_uint32_t* values)
733@
734@ On entry:
735@ values pointer is in r0
736@
737 .p2align 2
738#if defined(__ELF__)
739 .arch armv5te
740#endif
741DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind13Registers_arm19restoreiWMMXControlEPj)
742 ldc2 p1, cr8, [r0], #4 @ wldrw wCGR0, [r0], #4
743 ldc2 p1, cr9, [r0], #4 @ wldrw wCGR1, [r0], #4
744 ldc2 p1, cr10, [r0], #4 @ wldrw wCGR2, [r0], #4
745 ldc2 p1, cr11, [r0], #4 @ wldrw wCGR3, [r0], #4
746 JMP(lr)
747
748#endif
749
750#elif defined(__or1k__)
751
752DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind14Registers_or1k6jumptoEv)
753#
754# void libunwind::Registers_or1k::jumpto()
755#
756# On entry:
757# thread_state pointer is in r3
758#
759
760 # restore integral registers
761 l.lwz r0, 0(r3)
762 l.lwz r1, 4(r3)
763 l.lwz r2, 8(r3)
764 # skip r3 for now
765 l.lwz r4, 16(r3)
766 l.lwz r5, 20(r3)
767 l.lwz r6, 24(r3)
768 l.lwz r7, 28(r3)
769 l.lwz r8, 32(r3)
770 # skip r9
771 l.lwz r10, 40(r3)
772 l.lwz r11, 44(r3)
773 l.lwz r12, 48(r3)
774 l.lwz r13, 52(r3)
775 l.lwz r14, 56(r3)
776 l.lwz r15, 60(r3)
777 l.lwz r16, 64(r3)
778 l.lwz r17, 68(r3)
779 l.lwz r18, 72(r3)
780 l.lwz r19, 76(r3)
781 l.lwz r20, 80(r3)
782 l.lwz r21, 84(r3)
783 l.lwz r22, 88(r3)
784 l.lwz r23, 92(r3)
785 l.lwz r24, 96(r3)
786 l.lwz r25,100(r3)
787 l.lwz r26,104(r3)
788 l.lwz r27,108(r3)
789 l.lwz r28,112(r3)
790 l.lwz r29,116(r3)
791 l.lwz r30,120(r3)
792 l.lwz r31,124(r3)
793
794 # at last, restore r3
795 l.lwz r3, 12(r3)
796
797 # load new pc into ra
798 l.lwz r9, 128(r3)
799 # jump to pc
800 l.jr r9
801 l.nop
802
803#elif defined(__mips__) && defined(_ABIO32) && _MIPS_SIM == _ABIO32
804
805//
806// void libunwind::Registers_mips_o32::jumpto()
807//
808// On entry:
809// thread state pointer is in a0 ($4)
810//
811DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind18Registers_mips_o326jumptoEv)
812 .set push
813 .set noat
814 .set noreorder
815 .set nomacro
816#ifdef __mips_hard_float
817#if __mips_fpr != 64
818 ldc1 $f0, (4 * 36 + 8 * 0)($4)
819 ldc1 $f2, (4 * 36 + 8 * 2)($4)
820 ldc1 $f4, (4 * 36 + 8 * 4)($4)
821 ldc1 $f6, (4 * 36 + 8 * 6)($4)
822 ldc1 $f8, (4 * 36 + 8 * 8)($4)
823 ldc1 $f10, (4 * 36 + 8 * 10)($4)
824 ldc1 $f12, (4 * 36 + 8 * 12)($4)
825 ldc1 $f14, (4 * 36 + 8 * 14)($4)
826 ldc1 $f16, (4 * 36 + 8 * 16)($4)
827 ldc1 $f18, (4 * 36 + 8 * 18)($4)
828 ldc1 $f20, (4 * 36 + 8 * 20)($4)
829 ldc1 $f22, (4 * 36 + 8 * 22)($4)
830 ldc1 $f24, (4 * 36 + 8 * 24)($4)
831 ldc1 $f26, (4 * 36 + 8 * 26)($4)
832 ldc1 $f28, (4 * 36 + 8 * 28)($4)
833 ldc1 $f30, (4 * 36 + 8 * 30)($4)
834#else
835 ldc1 $f0, (4 * 36 + 8 * 0)($4)
836 ldc1 $f1, (4 * 36 + 8 * 1)($4)
837 ldc1 $f2, (4 * 36 + 8 * 2)($4)
838 ldc1 $f3, (4 * 36 + 8 * 3)($4)
839 ldc1 $f4, (4 * 36 + 8 * 4)($4)
840 ldc1 $f5, (4 * 36 + 8 * 5)($4)
841 ldc1 $f6, (4 * 36 + 8 * 6)($4)
842 ldc1 $f7, (4 * 36 + 8 * 7)($4)
843 ldc1 $f8, (4 * 36 + 8 * 8)($4)
844 ldc1 $f9, (4 * 36 + 8 * 9)($4)
845 ldc1 $f10, (4 * 36 + 8 * 10)($4)
846 ldc1 $f11, (4 * 36 + 8 * 11)($4)
847 ldc1 $f12, (4 * 36 + 8 * 12)($4)
848 ldc1 $f13, (4 * 36 + 8 * 13)($4)
849 ldc1 $f14, (4 * 36 + 8 * 14)($4)
850 ldc1 $f15, (4 * 36 + 8 * 15)($4)
851 ldc1 $f16, (4 * 36 + 8 * 16)($4)
852 ldc1 $f17, (4 * 36 + 8 * 17)($4)
853 ldc1 $f18, (4 * 36 + 8 * 18)($4)
854 ldc1 $f19, (4 * 36 + 8 * 19)($4)
855 ldc1 $f20, (4 * 36 + 8 * 20)($4)
856 ldc1 $f21, (4 * 36 + 8 * 21)($4)
857 ldc1 $f22, (4 * 36 + 8 * 22)($4)
858 ldc1 $f23, (4 * 36 + 8 * 23)($4)
859 ldc1 $f24, (4 * 36 + 8 * 24)($4)
860 ldc1 $f25, (4 * 36 + 8 * 25)($4)
861 ldc1 $f26, (4 * 36 + 8 * 26)($4)
862 ldc1 $f27, (4 * 36 + 8 * 27)($4)
863 ldc1 $f28, (4 * 36 + 8 * 28)($4)
864 ldc1 $f29, (4 * 36 + 8 * 29)($4)
865 ldc1 $f30, (4 * 36 + 8 * 30)($4)
866 ldc1 $f31, (4 * 36 + 8 * 31)($4)
867#endif
868#endif
869 // restore hi and lo
870 lw $8, (4 * 33)($4)
871 mthi $8
872 lw $8, (4 * 34)($4)
873 mtlo $8
874 // r0 is zero
875 lw $1, (4 * 1)($4)
876 lw $2, (4 * 2)($4)
877 lw $3, (4 * 3)($4)
878 // skip a0 for now
879 lw $5, (4 * 5)($4)
880 lw $6, (4 * 6)($4)
881 lw $7, (4 * 7)($4)
882 lw $8, (4 * 8)($4)
883 lw $9, (4 * 9)($4)
884 lw $10, (4 * 10)($4)
885 lw $11, (4 * 11)($4)
886 lw $12, (4 * 12)($4)
887 lw $13, (4 * 13)($4)
888 lw $14, (4 * 14)($4)
889 lw $15, (4 * 15)($4)
890 lw $16, (4 * 16)($4)
891 lw $17, (4 * 17)($4)
892 lw $18, (4 * 18)($4)
893 lw $19, (4 * 19)($4)
894 lw $20, (4 * 20)($4)
895 lw $21, (4 * 21)($4)
896 lw $22, (4 * 22)($4)
897 lw $23, (4 * 23)($4)
898 lw $24, (4 * 24)($4)
899 lw $25, (4 * 25)($4)
900 lw $26, (4 * 26)($4)
901 lw $27, (4 * 27)($4)
902 lw $28, (4 * 28)($4)
903 lw $29, (4 * 29)($4)
904 lw $30, (4 * 30)($4)
905 // load new pc into ra
906 lw $31, (4 * 32)($4)
907 // jump to ra, load a0 in the delay slot
908 jr $31
909 lw $4, (4 * 4)($4)
910 .set pop
911
912#elif defined(__mips64)
913
914//
915// void libunwind::Registers_mips_newabi::jumpto()
916//
917// On entry:
918// thread state pointer is in a0 ($4)
919//
920DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind21Registers_mips_newabi6jumptoEv)
921 .set push
922 .set noat
923 .set noreorder
924 .set nomacro
925#ifdef __mips_hard_float
926 ldc1 $f0, (8 * 35)($4)
927 ldc1 $f1, (8 * 36)($4)
928 ldc1 $f2, (8 * 37)($4)
929 ldc1 $f3, (8 * 38)($4)
930 ldc1 $f4, (8 * 39)($4)
931 ldc1 $f5, (8 * 40)($4)
932 ldc1 $f6, (8 * 41)($4)
933 ldc1 $f7, (8 * 42)($4)
934 ldc1 $f8, (8 * 43)($4)
935 ldc1 $f9, (8 * 44)($4)
936 ldc1 $f10, (8 * 45)($4)
937 ldc1 $f11, (8 * 46)($4)
938 ldc1 $f12, (8 * 47)($4)
939 ldc1 $f13, (8 * 48)($4)
940 ldc1 $f14, (8 * 49)($4)
941 ldc1 $f15, (8 * 50)($4)
942 ldc1 $f16, (8 * 51)($4)
943 ldc1 $f17, (8 * 52)($4)
944 ldc1 $f18, (8 * 53)($4)
945 ldc1 $f19, (8 * 54)($4)
946 ldc1 $f20, (8 * 55)($4)
947 ldc1 $f21, (8 * 56)($4)
948 ldc1 $f22, (8 * 57)($4)
949 ldc1 $f23, (8 * 58)($4)
950 ldc1 $f24, (8 * 59)($4)
951 ldc1 $f25, (8 * 60)($4)
952 ldc1 $f26, (8 * 61)($4)
953 ldc1 $f27, (8 * 62)($4)
954 ldc1 $f28, (8 * 63)($4)
955 ldc1 $f29, (8 * 64)($4)
956 ldc1 $f30, (8 * 65)($4)
957 ldc1 $f31, (8 * 66)($4)
958#endif
959 // restore hi and lo
960 ld $8, (8 * 33)($4)
961 mthi $8
962 ld $8, (8 * 34)($4)
963 mtlo $8
964 // r0 is zero
965 ld $1, (8 * 1)($4)
966 ld $2, (8 * 2)($4)
967 ld $3, (8 * 3)($4)
968 // skip a0 for now
969 ld $5, (8 * 5)($4)
970 ld $6, (8 * 6)($4)
971 ld $7, (8 * 7)($4)
972 ld $8, (8 * 8)($4)
973 ld $9, (8 * 9)($4)
974 ld $10, (8 * 10)($4)
975 ld $11, (8 * 11)($4)
976 ld $12, (8 * 12)($4)
977 ld $13, (8 * 13)($4)
978 ld $14, (8 * 14)($4)
979 ld $15, (8 * 15)($4)
980 ld $16, (8 * 16)($4)
981 ld $17, (8 * 17)($4)
982 ld $18, (8 * 18)($4)
983 ld $19, (8 * 19)($4)
984 ld $20, (8 * 20)($4)
985 ld $21, (8 * 21)($4)
986 ld $22, (8 * 22)($4)
987 ld $23, (8 * 23)($4)
988 ld $24, (8 * 24)($4)
989 ld $25, (8 * 25)($4)
990 ld $26, (8 * 26)($4)
991 ld $27, (8 * 27)($4)
992 ld $28, (8 * 28)($4)
993 ld $29, (8 * 29)($4)
994 ld $30, (8 * 30)($4)
995 // load new pc into ra
996 ld $31, (8 * 32)($4)
997 // jump to ra, load a0 in the delay slot
998 jr $31
999 ld $4, (8 * 4)($4)
1000 .set pop
1001
1002#elif defined(__sparc__)
1003
1004//
1005// void libunwind::Registers_sparc_o32::jumpto()
1006//
1007// On entry:
1008// thread_state pointer is in o0
1009//
1010DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind15Registers_sparc6jumptoEv)
1011 ta 3
1012 ldd [%o0 + 64], %l0
1013 ldd [%o0 + 72], %l2
1014 ldd [%o0 + 80], %l4
1015 ldd [%o0 + 88], %l6
1016 ldd [%o0 + 96], %i0
1017 ldd [%o0 + 104], %i2
1018 ldd [%o0 + 112], %i4
1019 ldd [%o0 + 120], %i6
1020 ld [%o0 + 60], %o7
1021 jmp %o7
1022 nop
1023
1024#endif
1025
1026#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
1027
1028NO_EXEC_STACK_DIRECTIVE
1029
1030