1 | /* |
2 | * ARM helper routines |
3 | * |
4 | * Copyright (c) 2005-2007 CodeSourcery, LLC |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
18 | */ |
19 | #include "qemu/osdep.h" |
20 | #include "qemu/units.h" |
21 | #include "qemu/log.h" |
22 | #include "qemu/main-loop.h" |
23 | #include "cpu.h" |
24 | #include "exec/helper-proto.h" |
25 | #include "internals.h" |
26 | #include "exec/exec-all.h" |
27 | #include "exec/cpu_ldst.h" |
28 | |
29 | #define SIGNBIT (uint32_t)0x80000000 |
30 | #define SIGNBIT64 ((uint64_t)1 << 63) |
31 | |
32 | static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp, |
33 | uint32_t syndrome, uint32_t target_el) |
34 | { |
35 | CPUState *cs = env_cpu(env); |
36 | |
37 | if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) { |
38 | /* |
39 | * Redirect NS EL1 exceptions to NS EL2. These are reported with |
40 | * their original syndrome register value, with the exception of |
41 | * SIMD/FP access traps, which are reported as uncategorized |
42 | * (see DDI0478C.a D1.10.4) |
43 | */ |
44 | target_el = 2; |
45 | if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) { |
46 | syndrome = syn_uncategorized(); |
47 | } |
48 | } |
49 | |
50 | assert(!excp_is_internal(excp)); |
51 | cs->exception_index = excp; |
52 | env->exception.syndrome = syndrome; |
53 | env->exception.target_el = target_el; |
54 | |
55 | return cs; |
56 | } |
57 | |
58 | void raise_exception(CPUARMState *env, uint32_t excp, |
59 | uint32_t syndrome, uint32_t target_el) |
60 | { |
61 | CPUState *cs = do_raise_exception(env, excp, syndrome, target_el); |
62 | cpu_loop_exit(cs); |
63 | } |
64 | |
65 | void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome, |
66 | uint32_t target_el, uintptr_t ra) |
67 | { |
68 | CPUState *cs = do_raise_exception(env, excp, syndrome, target_el); |
69 | cpu_loop_exit_restore(cs, ra); |
70 | } |
71 | |
72 | uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn, |
73 | uint32_t maxindex) |
74 | { |
75 | uint32_t val, shift; |
76 | uint64_t *table = vn; |
77 | |
78 | val = 0; |
79 | for (shift = 0; shift < 32; shift += 8) { |
80 | uint32_t index = (ireg >> shift) & 0xff; |
81 | if (index < maxindex) { |
82 | uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff; |
83 | val |= tmp << shift; |
84 | } else { |
85 | val |= def & (0xff << shift); |
86 | } |
87 | } |
88 | return val; |
89 | } |
90 | |
91 | void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue) |
92 | { |
93 | /* |
94 | * Perform the v8M stack limit check for SP updates from translated code, |
95 | * raising an exception if the limit is breached. |
96 | */ |
97 | if (newvalue < v7m_sp_limit(env)) { |
98 | CPUState *cs = env_cpu(env); |
99 | |
100 | /* |
101 | * Stack limit exceptions are a rare case, so rather than syncing |
102 | * PC/condbits before the call, we use cpu_restore_state() to |
103 | * get them right before raising the exception. |
104 | */ |
105 | cpu_restore_state(cs, GETPC(), true); |
106 | raise_exception(env, EXCP_STKOF, 0, 1); |
107 | } |
108 | } |
109 | |
110 | uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b) |
111 | { |
112 | uint32_t res = a + b; |
113 | if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) |
114 | env->QF = 1; |
115 | return res; |
116 | } |
117 | |
118 | uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b) |
119 | { |
120 | uint32_t res = a + b; |
121 | if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) { |
122 | env->QF = 1; |
123 | res = ~(((int32_t)a >> 31) ^ SIGNBIT); |
124 | } |
125 | return res; |
126 | } |
127 | |
128 | uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b) |
129 | { |
130 | uint32_t res = a - b; |
131 | if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) { |
132 | env->QF = 1; |
133 | res = ~(((int32_t)a >> 31) ^ SIGNBIT); |
134 | } |
135 | return res; |
136 | } |
137 | |
138 | uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) |
139 | { |
140 | uint32_t res = a + b; |
141 | if (res < a) { |
142 | env->QF = 1; |
143 | res = ~0; |
144 | } |
145 | return res; |
146 | } |
147 | |
148 | uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b) |
149 | { |
150 | uint32_t res = a - b; |
151 | if (res > a) { |
152 | env->QF = 1; |
153 | res = 0; |
154 | } |
155 | return res; |
156 | } |
157 | |
158 | /* Signed saturation. */ |
159 | static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift) |
160 | { |
161 | int32_t top; |
162 | uint32_t mask; |
163 | |
164 | top = val >> shift; |
165 | mask = (1u << shift) - 1; |
166 | if (top > 0) { |
167 | env->QF = 1; |
168 | return mask; |
169 | } else if (top < -1) { |
170 | env->QF = 1; |
171 | return ~mask; |
172 | } |
173 | return val; |
174 | } |
175 | |
176 | /* Unsigned saturation. */ |
177 | static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift) |
178 | { |
179 | uint32_t max; |
180 | |
181 | max = (1u << shift) - 1; |
182 | if (val < 0) { |
183 | env->QF = 1; |
184 | return 0; |
185 | } else if (val > max) { |
186 | env->QF = 1; |
187 | return max; |
188 | } |
189 | return val; |
190 | } |
191 | |
192 | /* Signed saturate. */ |
193 | uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift) |
194 | { |
195 | return do_ssat(env, x, shift); |
196 | } |
197 | |
198 | /* Dual halfword signed saturate. */ |
199 | uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift) |
200 | { |
201 | uint32_t res; |
202 | |
203 | res = (uint16_t)do_ssat(env, (int16_t)x, shift); |
204 | res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16; |
205 | return res; |
206 | } |
207 | |
208 | /* Unsigned saturate. */ |
209 | uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift) |
210 | { |
211 | return do_usat(env, x, shift); |
212 | } |
213 | |
214 | /* Dual halfword unsigned saturate. */ |
215 | uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift) |
216 | { |
217 | uint32_t res; |
218 | |
219 | res = (uint16_t)do_usat(env, (int16_t)x, shift); |
220 | res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16; |
221 | return res; |
222 | } |
223 | |
224 | void HELPER(setend)(CPUARMState *env) |
225 | { |
226 | env->uncached_cpsr ^= CPSR_E; |
227 | } |
228 | |
229 | /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped. |
230 | * The function returns the target EL (1-3) if the instruction is to be trapped; |
231 | * otherwise it returns 0 indicating it is not trapped. |
232 | */ |
233 | static inline int check_wfx_trap(CPUARMState *env, bool is_wfe) |
234 | { |
235 | int cur_el = arm_current_el(env); |
236 | uint64_t mask; |
237 | |
238 | if (arm_feature(env, ARM_FEATURE_M)) { |
239 | /* M profile cores can never trap WFI/WFE. */ |
240 | return 0; |
241 | } |
242 | |
243 | /* If we are currently in EL0 then we need to check if SCTLR is set up for |
244 | * WFx instructions being trapped to EL1. These trap bits don't exist in v7. |
245 | */ |
246 | if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) { |
247 | int target_el; |
248 | |
249 | mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI; |
250 | if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) { |
251 | /* Secure EL0 and Secure PL1 is at EL3 */ |
252 | target_el = 3; |
253 | } else { |
254 | target_el = 1; |
255 | } |
256 | |
257 | if (!(env->cp15.sctlr_el[target_el] & mask)) { |
258 | return target_el; |
259 | } |
260 | } |
261 | |
262 | /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it |
263 | * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the |
264 | * bits will be zero indicating no trap. |
265 | */ |
266 | if (cur_el < 2) { |
267 | mask = is_wfe ? HCR_TWE : HCR_TWI; |
268 | if (arm_hcr_el2_eff(env) & mask) { |
269 | return 2; |
270 | } |
271 | } |
272 | |
273 | /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */ |
274 | if (cur_el < 3) { |
275 | mask = (is_wfe) ? SCR_TWE : SCR_TWI; |
276 | if (env->cp15.scr_el3 & mask) { |
277 | return 3; |
278 | } |
279 | } |
280 | |
281 | return 0; |
282 | } |
283 | |
284 | void HELPER(wfi)(CPUARMState *env, uint32_t insn_len) |
285 | { |
286 | CPUState *cs = env_cpu(env); |
287 | int target_el = check_wfx_trap(env, false); |
288 | |
289 | if (cpu_has_work(cs)) { |
290 | /* Don't bother to go into our "low power state" if |
291 | * we would just wake up immediately. |
292 | */ |
293 | return; |
294 | } |
295 | |
296 | if (target_el) { |
297 | env->pc -= insn_len; |
298 | raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2), |
299 | target_el); |
300 | } |
301 | |
302 | cs->exception_index = EXCP_HLT; |
303 | cs->halted = 1; |
304 | cpu_loop_exit(cs); |
305 | } |
306 | |
307 | void HELPER(wfe)(CPUARMState *env) |
308 | { |
309 | /* This is a hint instruction that is semantically different |
310 | * from YIELD even though we currently implement it identically. |
311 | * Don't actually halt the CPU, just yield back to top |
312 | * level loop. This is not going into a "low power state" |
313 | * (ie halting until some event occurs), so we never take |
314 | * a configurable trap to a different exception level. |
315 | */ |
316 | HELPER(yield)(env); |
317 | } |
318 | |
319 | void HELPER(yield)(CPUARMState *env) |
320 | { |
321 | CPUState *cs = env_cpu(env); |
322 | |
323 | /* This is a non-trappable hint instruction that generally indicates |
324 | * that the guest is currently busy-looping. Yield control back to the |
325 | * top level loop so that a more deserving VCPU has a chance to run. |
326 | */ |
327 | cs->exception_index = EXCP_YIELD; |
328 | cpu_loop_exit(cs); |
329 | } |
330 | |
331 | /* Raise an internal-to-QEMU exception. This is limited to only |
332 | * those EXCP values which are special cases for QEMU to interrupt |
333 | * execution and not to be used for exceptions which are passed to |
334 | * the guest (those must all have syndrome information and thus should |
335 | * use exception_with_syndrome). |
336 | */ |
337 | void HELPER(exception_internal)(CPUARMState *env, uint32_t excp) |
338 | { |
339 | CPUState *cs = env_cpu(env); |
340 | |
341 | assert(excp_is_internal(excp)); |
342 | cs->exception_index = excp; |
343 | cpu_loop_exit(cs); |
344 | } |
345 | |
346 | /* Raise an exception with the specified syndrome register value */ |
347 | void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp, |
348 | uint32_t syndrome, uint32_t target_el) |
349 | { |
350 | raise_exception(env, excp, syndrome, target_el); |
351 | } |
352 | |
353 | /* Raise an EXCP_BKPT with the specified syndrome register value, |
354 | * targeting the correct exception level for debug exceptions. |
355 | */ |
356 | void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome) |
357 | { |
358 | int debug_el = arm_debug_target_el(env); |
359 | int cur_el = arm_current_el(env); |
360 | |
361 | /* FSR will only be used if the debug target EL is AArch32. */ |
362 | env->exception.fsr = arm_debug_exception_fsr(env); |
363 | /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing |
364 | * values to the guest that it shouldn't be able to see at its |
365 | * exception/security level. |
366 | */ |
367 | env->exception.vaddress = 0; |
368 | /* |
369 | * Other kinds of architectural debug exception are ignored if |
370 | * they target an exception level below the current one (in QEMU |
371 | * this is checked by arm_generate_debug_exceptions()). Breakpoint |
372 | * instructions are special because they always generate an exception |
373 | * to somewhere: if they can't go to the configured debug exception |
374 | * level they are taken to the current exception level. |
375 | */ |
376 | if (debug_el < cur_el) { |
377 | debug_el = cur_el; |
378 | } |
379 | raise_exception(env, EXCP_BKPT, syndrome, debug_el); |
380 | } |
381 | |
382 | uint32_t HELPER(cpsr_read)(CPUARMState *env) |
383 | { |
384 | return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED); |
385 | } |
386 | |
387 | void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask) |
388 | { |
389 | cpsr_write(env, val, mask, CPSRWriteByInstr); |
390 | } |
391 | |
392 | /* Write the CPSR for a 32-bit exception return */ |
393 | void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) |
394 | { |
395 | qemu_mutex_lock_iothread(); |
396 | arm_call_pre_el_change_hook(env_archcpu(env)); |
397 | qemu_mutex_unlock_iothread(); |
398 | |
399 | cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn); |
400 | |
401 | /* Generated code has already stored the new PC value, but |
402 | * without masking out its low bits, because which bits need |
403 | * masking depends on whether we're returning to Thumb or ARM |
404 | * state. Do the masking now. |
405 | */ |
406 | env->regs[15] &= (env->thumb ? ~1 : ~3); |
407 | |
408 | qemu_mutex_lock_iothread(); |
409 | arm_call_el_change_hook(env_archcpu(env)); |
410 | qemu_mutex_unlock_iothread(); |
411 | } |
412 | |
413 | /* Access to user mode registers from privileged modes. */ |
414 | uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno) |
415 | { |
416 | uint32_t val; |
417 | |
418 | if (regno == 13) { |
419 | val = env->banked_r13[BANK_USRSYS]; |
420 | } else if (regno == 14) { |
421 | val = env->banked_r14[BANK_USRSYS]; |
422 | } else if (regno >= 8 |
423 | && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { |
424 | val = env->usr_regs[regno - 8]; |
425 | } else { |
426 | val = env->regs[regno]; |
427 | } |
428 | return val; |
429 | } |
430 | |
431 | void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val) |
432 | { |
433 | if (regno == 13) { |
434 | env->banked_r13[BANK_USRSYS] = val; |
435 | } else if (regno == 14) { |
436 | env->banked_r14[BANK_USRSYS] = val; |
437 | } else if (regno >= 8 |
438 | && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) { |
439 | env->usr_regs[regno - 8] = val; |
440 | } else { |
441 | env->regs[regno] = val; |
442 | } |
443 | } |
444 | |
445 | void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val) |
446 | { |
447 | if ((env->uncached_cpsr & CPSR_M) == mode) { |
448 | env->regs[13] = val; |
449 | } else { |
450 | env->banked_r13[bank_number(mode)] = val; |
451 | } |
452 | } |
453 | |
454 | uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode) |
455 | { |
456 | if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) { |
457 | /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF. |
458 | * Other UNPREDICTABLE and UNDEF cases were caught at translate time. |
459 | */ |
460 | raise_exception(env, EXCP_UDEF, syn_uncategorized(), |
461 | exception_target_el(env)); |
462 | } |
463 | |
464 | if ((env->uncached_cpsr & CPSR_M) == mode) { |
465 | return env->regs[13]; |
466 | } else { |
467 | return env->banked_r13[bank_number(mode)]; |
468 | } |
469 | } |
470 | |
471 | static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode, |
472 | uint32_t regno) |
473 | { |
474 | /* Raise an exception if the requested access is one of the UNPREDICTABLE |
475 | * cases; otherwise return. This broadly corresponds to the pseudocode |
476 | * BankedRegisterAccessValid() and SPSRAccessValid(), |
477 | * except that we have already handled some cases at translate time. |
478 | */ |
479 | int curmode = env->uncached_cpsr & CPSR_M; |
480 | |
481 | if (regno == 17) { |
482 | /* ELR_Hyp: a special case because access from tgtmode is OK */ |
483 | if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) { |
484 | goto undef; |
485 | } |
486 | return; |
487 | } |
488 | |
489 | if (curmode == tgtmode) { |
490 | goto undef; |
491 | } |
492 | |
493 | if (tgtmode == ARM_CPU_MODE_USR) { |
494 | switch (regno) { |
495 | case 8 ... 12: |
496 | if (curmode != ARM_CPU_MODE_FIQ) { |
497 | goto undef; |
498 | } |
499 | break; |
500 | case 13: |
501 | if (curmode == ARM_CPU_MODE_SYS) { |
502 | goto undef; |
503 | } |
504 | break; |
505 | case 14: |
506 | if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) { |
507 | goto undef; |
508 | } |
509 | break; |
510 | default: |
511 | break; |
512 | } |
513 | } |
514 | |
515 | if (tgtmode == ARM_CPU_MODE_HYP) { |
516 | /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */ |
517 | if (curmode != ARM_CPU_MODE_MON) { |
518 | goto undef; |
519 | } |
520 | } |
521 | |
522 | return; |
523 | |
524 | undef: |
525 | raise_exception(env, EXCP_UDEF, syn_uncategorized(), |
526 | exception_target_el(env)); |
527 | } |
528 | |
529 | void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode, |
530 | uint32_t regno) |
531 | { |
532 | msr_mrs_banked_exc_checks(env, tgtmode, regno); |
533 | |
534 | switch (regno) { |
535 | case 16: /* SPSRs */ |
536 | env->banked_spsr[bank_number(tgtmode)] = value; |
537 | break; |
538 | case 17: /* ELR_Hyp */ |
539 | env->elr_el[2] = value; |
540 | break; |
541 | case 13: |
542 | env->banked_r13[bank_number(tgtmode)] = value; |
543 | break; |
544 | case 14: |
545 | env->banked_r14[r14_bank_number(tgtmode)] = value; |
546 | break; |
547 | case 8 ... 12: |
548 | switch (tgtmode) { |
549 | case ARM_CPU_MODE_USR: |
550 | env->usr_regs[regno - 8] = value; |
551 | break; |
552 | case ARM_CPU_MODE_FIQ: |
553 | env->fiq_regs[regno - 8] = value; |
554 | break; |
555 | default: |
556 | g_assert_not_reached(); |
557 | } |
558 | break; |
559 | default: |
560 | g_assert_not_reached(); |
561 | } |
562 | } |
563 | |
564 | uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno) |
565 | { |
566 | msr_mrs_banked_exc_checks(env, tgtmode, regno); |
567 | |
568 | switch (regno) { |
569 | case 16: /* SPSRs */ |
570 | return env->banked_spsr[bank_number(tgtmode)]; |
571 | case 17: /* ELR_Hyp */ |
572 | return env->elr_el[2]; |
573 | case 13: |
574 | return env->banked_r13[bank_number(tgtmode)]; |
575 | case 14: |
576 | return env->banked_r14[r14_bank_number(tgtmode)]; |
577 | case 8 ... 12: |
578 | switch (tgtmode) { |
579 | case ARM_CPU_MODE_USR: |
580 | return env->usr_regs[regno - 8]; |
581 | case ARM_CPU_MODE_FIQ: |
582 | return env->fiq_regs[regno - 8]; |
583 | default: |
584 | g_assert_not_reached(); |
585 | } |
586 | default: |
587 | g_assert_not_reached(); |
588 | } |
589 | } |
590 | |
591 | void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome, |
592 | uint32_t isread) |
593 | { |
594 | const ARMCPRegInfo *ri = rip; |
595 | int target_el; |
596 | |
597 | if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14 |
598 | && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) { |
599 | raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env)); |
600 | } |
601 | |
602 | if (!ri->accessfn) { |
603 | return; |
604 | } |
605 | |
606 | switch (ri->accessfn(env, ri, isread)) { |
607 | case CP_ACCESS_OK: |
608 | return; |
609 | case CP_ACCESS_TRAP: |
610 | target_el = exception_target_el(env); |
611 | break; |
612 | case CP_ACCESS_TRAP_EL2: |
613 | /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is |
614 | * a bug in the access function. |
615 | */ |
616 | assert(!arm_is_secure(env) && arm_current_el(env) != 3); |
617 | target_el = 2; |
618 | break; |
619 | case CP_ACCESS_TRAP_EL3: |
620 | target_el = 3; |
621 | break; |
622 | case CP_ACCESS_TRAP_UNCATEGORIZED: |
623 | target_el = exception_target_el(env); |
624 | syndrome = syn_uncategorized(); |
625 | break; |
626 | case CP_ACCESS_TRAP_UNCATEGORIZED_EL2: |
627 | target_el = 2; |
628 | syndrome = syn_uncategorized(); |
629 | break; |
630 | case CP_ACCESS_TRAP_UNCATEGORIZED_EL3: |
631 | target_el = 3; |
632 | syndrome = syn_uncategorized(); |
633 | break; |
634 | case CP_ACCESS_TRAP_FP_EL2: |
635 | target_el = 2; |
636 | /* Since we are an implementation that takes exceptions on a trapped |
637 | * conditional insn only if the insn has passed its condition code |
638 | * check, we take the IMPDEF choice to always report CV=1 COND=0xe |
639 | * (which is also the required value for AArch64 traps). |
640 | */ |
641 | syndrome = syn_fp_access_trap(1, 0xe, false); |
642 | break; |
643 | case CP_ACCESS_TRAP_FP_EL3: |
644 | target_el = 3; |
645 | syndrome = syn_fp_access_trap(1, 0xe, false); |
646 | break; |
647 | default: |
648 | g_assert_not_reached(); |
649 | } |
650 | |
651 | raise_exception(env, EXCP_UDEF, syndrome, target_el); |
652 | } |
653 | |
654 | void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value) |
655 | { |
656 | const ARMCPRegInfo *ri = rip; |
657 | |
658 | if (ri->type & ARM_CP_IO) { |
659 | qemu_mutex_lock_iothread(); |
660 | ri->writefn(env, ri, value); |
661 | qemu_mutex_unlock_iothread(); |
662 | } else { |
663 | ri->writefn(env, ri, value); |
664 | } |
665 | } |
666 | |
667 | uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip) |
668 | { |
669 | const ARMCPRegInfo *ri = rip; |
670 | uint32_t res; |
671 | |
672 | if (ri->type & ARM_CP_IO) { |
673 | qemu_mutex_lock_iothread(); |
674 | res = ri->readfn(env, ri); |
675 | qemu_mutex_unlock_iothread(); |
676 | } else { |
677 | res = ri->readfn(env, ri); |
678 | } |
679 | |
680 | return res; |
681 | } |
682 | |
683 | void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value) |
684 | { |
685 | const ARMCPRegInfo *ri = rip; |
686 | |
687 | if (ri->type & ARM_CP_IO) { |
688 | qemu_mutex_lock_iothread(); |
689 | ri->writefn(env, ri, value); |
690 | qemu_mutex_unlock_iothread(); |
691 | } else { |
692 | ri->writefn(env, ri, value); |
693 | } |
694 | } |
695 | |
696 | uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip) |
697 | { |
698 | const ARMCPRegInfo *ri = rip; |
699 | uint64_t res; |
700 | |
701 | if (ri->type & ARM_CP_IO) { |
702 | qemu_mutex_lock_iothread(); |
703 | res = ri->readfn(env, ri); |
704 | qemu_mutex_unlock_iothread(); |
705 | } else { |
706 | res = ri->readfn(env, ri); |
707 | } |
708 | |
709 | return res; |
710 | } |
711 | |
712 | void HELPER(pre_hvc)(CPUARMState *env) |
713 | { |
714 | ARMCPU *cpu = env_archcpu(env); |
715 | int cur_el = arm_current_el(env); |
716 | /* FIXME: Use actual secure state. */ |
717 | bool secure = false; |
718 | bool undef; |
719 | |
720 | if (arm_is_psci_call(cpu, EXCP_HVC)) { |
721 | /* If PSCI is enabled and this looks like a valid PSCI call then |
722 | * that overrides the architecturally mandated HVC behaviour. |
723 | */ |
724 | return; |
725 | } |
726 | |
727 | if (!arm_feature(env, ARM_FEATURE_EL2)) { |
728 | /* If EL2 doesn't exist, HVC always UNDEFs */ |
729 | undef = true; |
730 | } else if (arm_feature(env, ARM_FEATURE_EL3)) { |
731 | /* EL3.HCE has priority over EL2.HCD. */ |
732 | undef = !(env->cp15.scr_el3 & SCR_HCE); |
733 | } else { |
734 | undef = env->cp15.hcr_el2 & HCR_HCD; |
735 | } |
736 | |
737 | /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state. |
738 | * For ARMv8/AArch64, HVC is allowed in EL3. |
739 | * Note that we've already trapped HVC from EL0 at translation |
740 | * time. |
741 | */ |
742 | if (secure && (!is_a64(env) || cur_el == 1)) { |
743 | undef = true; |
744 | } |
745 | |
746 | if (undef) { |
747 | raise_exception(env, EXCP_UDEF, syn_uncategorized(), |
748 | exception_target_el(env)); |
749 | } |
750 | } |
751 | |
752 | void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) |
753 | { |
754 | ARMCPU *cpu = env_archcpu(env); |
755 | int cur_el = arm_current_el(env); |
756 | bool secure = arm_is_secure(env); |
757 | bool smd_flag = env->cp15.scr_el3 & SCR_SMD; |
758 | |
759 | /* |
760 | * SMC behaviour is summarized in the following table. |
761 | * This helper handles the "Trap to EL2" and "Undef insn" cases. |
762 | * The "Trap to EL3" and "PSCI call" cases are handled in the exception |
763 | * helper. |
764 | * |
765 | * -> ARM_FEATURE_EL3 and !SMD |
766 | * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1 |
767 | * |
768 | * Conduit SMC, valid call Trap to EL2 PSCI Call |
769 | * Conduit SMC, inval call Trap to EL2 Trap to EL3 |
770 | * Conduit not SMC Trap to EL2 Trap to EL3 |
771 | * |
772 | * |
773 | * -> ARM_FEATURE_EL3 and SMD |
774 | * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1 |
775 | * |
776 | * Conduit SMC, valid call Trap to EL2 PSCI Call |
777 | * Conduit SMC, inval call Trap to EL2 Undef insn |
778 | * Conduit not SMC Trap to EL2 Undef insn |
779 | * |
780 | * |
781 | * -> !ARM_FEATURE_EL3 |
782 | * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1 |
783 | * |
784 | * Conduit SMC, valid call Trap to EL2 PSCI Call |
785 | * Conduit SMC, inval call Trap to EL2 Undef insn |
786 | * Conduit not SMC Undef insn Undef insn |
787 | */ |
788 | |
789 | /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state. |
790 | * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization |
791 | * extensions, SMD only applies to NS state. |
792 | * On ARMv7 without the Virtualization extensions, the SMD bit |
793 | * doesn't exist, but we forbid the guest to set it to 1 in scr_write(), |
794 | * so we need not special case this here. |
795 | */ |
796 | bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag |
797 | : smd_flag && !secure; |
798 | |
799 | if (!arm_feature(env, ARM_FEATURE_EL3) && |
800 | cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) { |
801 | /* If we have no EL3 then SMC always UNDEFs and can't be |
802 | * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3 |
803 | * firmware within QEMU, and we want an EL2 guest to be able |
804 | * to forbid its EL1 from making PSCI calls into QEMU's |
805 | * "firmware" via HCR.TSC, so for these purposes treat |
806 | * PSCI-via-SMC as implying an EL3. |
807 | * This handles the very last line of the previous table. |
808 | */ |
809 | raise_exception(env, EXCP_UDEF, syn_uncategorized(), |
810 | exception_target_el(env)); |
811 | } |
812 | |
813 | if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) { |
814 | /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. |
815 | * We also want an EL2 guest to be able to forbid its EL1 from |
816 | * making PSCI calls into QEMU's "firmware" via HCR.TSC. |
817 | * This handles all the "Trap to EL2" cases of the previous table. |
818 | */ |
819 | raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); |
820 | } |
821 | |
822 | /* Catch the two remaining "Undef insn" cases of the previous table: |
823 | * - PSCI conduit is SMC but we don't have a valid PCSI call, |
824 | * - We don't have EL3 or SMD is set. |
825 | */ |
826 | if (!arm_is_psci_call(cpu, EXCP_SMC) && |
827 | (smd || !arm_feature(env, ARM_FEATURE_EL3))) { |
828 | raise_exception(env, EXCP_UDEF, syn_uncategorized(), |
829 | exception_target_el(env)); |
830 | } |
831 | } |
832 | |
833 | /* ??? Flag setting arithmetic is awkward because we need to do comparisons. |
834 | The only way to do that in TCG is a conditional branch, which clobbers |
835 | all our temporaries. For now implement these as helper functions. */ |
836 | |
837 | /* Similarly for variable shift instructions. */ |
838 | |
839 | uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i) |
840 | { |
841 | int shift = i & 0xff; |
842 | if (shift >= 32) { |
843 | if (shift == 32) |
844 | env->CF = x & 1; |
845 | else |
846 | env->CF = 0; |
847 | return 0; |
848 | } else if (shift != 0) { |
849 | env->CF = (x >> (32 - shift)) & 1; |
850 | return x << shift; |
851 | } |
852 | return x; |
853 | } |
854 | |
855 | uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i) |
856 | { |
857 | int shift = i & 0xff; |
858 | if (shift >= 32) { |
859 | if (shift == 32) |
860 | env->CF = (x >> 31) & 1; |
861 | else |
862 | env->CF = 0; |
863 | return 0; |
864 | } else if (shift != 0) { |
865 | env->CF = (x >> (shift - 1)) & 1; |
866 | return x >> shift; |
867 | } |
868 | return x; |
869 | } |
870 | |
871 | uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i) |
872 | { |
873 | int shift = i & 0xff; |
874 | if (shift >= 32) { |
875 | env->CF = (x >> 31) & 1; |
876 | return (int32_t)x >> 31; |
877 | } else if (shift != 0) { |
878 | env->CF = (x >> (shift - 1)) & 1; |
879 | return (int32_t)x >> shift; |
880 | } |
881 | return x; |
882 | } |
883 | |
884 | uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i) |
885 | { |
886 | int shift1, shift; |
887 | shift1 = i & 0xff; |
888 | shift = shift1 & 0x1f; |
889 | if (shift == 0) { |
890 | if (shift1 != 0) |
891 | env->CF = (x >> 31) & 1; |
892 | return x; |
893 | } else { |
894 | env->CF = (x >> (shift - 1)) & 1; |
895 | return ((uint32_t)x >> shift) | (x << (32 - shift)); |
896 | } |
897 | } |
898 | |
899 | void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in) |
900 | { |
901 | /* |
902 | * Implement DC ZVA, which zeroes a fixed-length block of memory. |
903 | * Note that we do not implement the (architecturally mandated) |
904 | * alignment fault for attempts to use this on Device memory |
905 | * (which matches the usual QEMU behaviour of not implementing either |
906 | * alignment faults or any memory attribute handling). |
907 | */ |
908 | |
909 | ARMCPU *cpu = env_archcpu(env); |
910 | uint64_t blocklen = 4 << cpu->dcz_blocksize; |
911 | uint64_t vaddr = vaddr_in & ~(blocklen - 1); |
912 | |
913 | #ifndef CONFIG_USER_ONLY |
914 | { |
915 | /* |
916 | * Slightly awkwardly, QEMU's TARGET_PAGE_SIZE may be less than |
917 | * the block size so we might have to do more than one TLB lookup. |
918 | * We know that in fact for any v8 CPU the page size is at least 4K |
919 | * and the block size must be 2K or less, but TARGET_PAGE_SIZE is only |
920 | * 1K as an artefact of legacy v5 subpage support being present in the |
921 | * same QEMU executable. So in practice the hostaddr[] array has |
922 | * two entries, given the current setting of TARGET_PAGE_BITS_MIN. |
923 | */ |
924 | int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE); |
925 | void *hostaddr[DIV_ROUND_UP(2 * KiB, 1 << TARGET_PAGE_BITS_MIN)]; |
926 | int try, i; |
927 | unsigned mmu_idx = cpu_mmu_index(env, false); |
928 | TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx); |
929 | |
930 | assert(maxidx <= ARRAY_SIZE(hostaddr)); |
931 | |
932 | for (try = 0; try < 2; try++) { |
933 | |
934 | for (i = 0; i < maxidx; i++) { |
935 | hostaddr[i] = tlb_vaddr_to_host(env, |
936 | vaddr + TARGET_PAGE_SIZE * i, |
937 | 1, mmu_idx); |
938 | if (!hostaddr[i]) { |
939 | break; |
940 | } |
941 | } |
942 | if (i == maxidx) { |
943 | /* |
944 | * If it's all in the TLB it's fair game for just writing to; |
945 | * we know we don't need to update dirty status, etc. |
946 | */ |
947 | for (i = 0; i < maxidx - 1; i++) { |
948 | memset(hostaddr[i], 0, TARGET_PAGE_SIZE); |
949 | } |
950 | memset(hostaddr[i], 0, blocklen - (i * TARGET_PAGE_SIZE)); |
951 | return; |
952 | } |
953 | /* |
954 | * OK, try a store and see if we can populate the tlb. This |
955 | * might cause an exception if the memory isn't writable, |
956 | * in which case we will longjmp out of here. We must for |
957 | * this purpose use the actual register value passed to us |
958 | * so that we get the fault address right. |
959 | */ |
960 | helper_ret_stb_mmu(env, vaddr_in, 0, oi, GETPC()); |
961 | /* Now we can populate the other TLB entries, if any */ |
962 | for (i = 0; i < maxidx; i++) { |
963 | uint64_t va = vaddr + TARGET_PAGE_SIZE * i; |
964 | if (va != (vaddr_in & TARGET_PAGE_MASK)) { |
965 | helper_ret_stb_mmu(env, va, 0, oi, GETPC()); |
966 | } |
967 | } |
968 | } |
969 | |
970 | /* |
971 | * Slow path (probably attempt to do this to an I/O device or |
972 | * similar, or clearing of a block of code we have translations |
973 | * cached for). Just do a series of byte writes as the architecture |
974 | * demands. It's not worth trying to use a cpu_physical_memory_map(), |
975 | * memset(), unmap() sequence here because: |
976 | * + we'd need to account for the blocksize being larger than a page |
977 | * + the direct-RAM access case is almost always going to be dealt |
978 | * with in the fastpath code above, so there's no speed benefit |
979 | * + we would have to deal with the map returning NULL because the |
980 | * bounce buffer was in use |
981 | */ |
982 | for (i = 0; i < blocklen; i++) { |
983 | helper_ret_stb_mmu(env, vaddr + i, 0, oi, GETPC()); |
984 | } |
985 | } |
986 | #else |
987 | memset(g2h(vaddr), 0, blocklen); |
988 | #endif |
989 | } |
990 | |