1/*
2 * ARM translation: AArch32 VFP instructions
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2019 Linaro, Ltd.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 */
22
23/*
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
27 */
28
29/* Include the generated VFP decoder */
30#include "decode-vfp.inc.c"
31#include "decode-vfp-uncond.inc.c"
32
33/*
34 * The imm8 encodes the sign bit, enough bits to represent an exponent in
35 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
36 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
37 */
38uint64_t vfp_expand_imm(int size, uint8_t imm8)
39{
40 uint64_t imm;
41
42 switch (size) {
43 case MO_64:
44 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
45 (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
46 extract32(imm8, 0, 6);
47 imm <<= 48;
48 break;
49 case MO_32:
50 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
51 (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
52 (extract32(imm8, 0, 6) << 3);
53 imm <<= 16;
54 break;
55 case MO_16:
56 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
57 (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
58 (extract32(imm8, 0, 6) << 6);
59 break;
60 default:
61 g_assert_not_reached();
62 }
63 return imm;
64}
65
66/*
67 * Return the offset of a 16-bit half of the specified VFP single-precision
68 * register. If top is true, returns the top 16 bits; otherwise the bottom
69 * 16 bits.
70 */
71static inline long vfp_f16_offset(unsigned reg, bool top)
72{
73 long offs = vfp_reg_offset(false, reg);
74#ifdef HOST_WORDS_BIGENDIAN
75 if (!top) {
76 offs += 2;
77 }
78#else
79 if (top) {
80 offs += 2;
81 }
82#endif
83 return offs;
84}
85
86/*
87 * Check that VFP access is enabled. If it is, do the necessary
88 * M-profile lazy-FP handling and then return true.
89 * If not, emit code to generate an appropriate exception and
90 * return false.
91 * The ignore_vfp_enabled argument specifies that we should ignore
92 * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
93 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
94 */
95static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
96{
97 if (s->fp_excp_el) {
98 if (arm_dc_feature(s, ARM_FEATURE_M)) {
99 gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
100 s->fp_excp_el);
101 } else {
102 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
103 syn_fp_access_trap(1, 0xe, false),
104 s->fp_excp_el);
105 }
106 return false;
107 }
108
109 if (!s->vfp_enabled && !ignore_vfp_enabled) {
110 assert(!arm_dc_feature(s, ARM_FEATURE_M));
111 unallocated_encoding(s);
112 return false;
113 }
114
115 if (arm_dc_feature(s, ARM_FEATURE_M)) {
116 /* Handle M-profile lazy FP state mechanics */
117
118 /* Trigger lazy-state preservation if necessary */
119 if (s->v7m_lspact) {
120 /*
121 * Lazy state saving affects external memory and also the NVIC,
122 * so we must mark it as an IO operation for icount.
123 */
124 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
125 gen_io_start();
126 }
127 gen_helper_v7m_preserve_fp_state(cpu_env);
128 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
129 gen_io_end();
130 }
131 /*
132 * If the preserve_fp_state helper doesn't throw an exception
133 * then it will clear LSPACT; we don't need to repeat this for
134 * any further FP insns in this TB.
135 */
136 s->v7m_lspact = false;
137 }
138
139 /* Update ownership of FP context: set FPCCR.S to match current state */
140 if (s->v8m_fpccr_s_wrong) {
141 TCGv_i32 tmp;
142
143 tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
144 if (s->v8m_secure) {
145 tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
146 } else {
147 tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
148 }
149 store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
150 /* Don't need to do this for any further FP insns in this TB */
151 s->v8m_fpccr_s_wrong = false;
152 }
153
154 if (s->v7m_new_fp_ctxt_needed) {
155 /*
156 * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
157 * and the FPSCR.
158 */
159 TCGv_i32 control, fpscr;
160 uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
161
162 fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
163 gen_helper_vfp_set_fpscr(cpu_env, fpscr);
164 tcg_temp_free_i32(fpscr);
165 /*
166 * We don't need to arrange to end the TB, because the only
167 * parts of FPSCR which we cache in the TB flags are the VECLEN
168 * and VECSTRIDE, and those don't exist for M-profile.
169 */
170
171 if (s->v8m_secure) {
172 bits |= R_V7M_CONTROL_SFPA_MASK;
173 }
174 control = load_cpu_field(v7m.control[M_REG_S]);
175 tcg_gen_ori_i32(control, control, bits);
176 store_cpu_field(control, v7m.control[M_REG_S]);
177 /* Don't need to do this for any further FP insns in this TB */
178 s->v7m_new_fp_ctxt_needed = false;
179 }
180 }
181
182 return true;
183}
184
185/*
186 * The most usual kind of VFP access check, for everything except
187 * FMXR/FMRX to the always-available special registers.
188 */
189static bool vfp_access_check(DisasContext *s)
190{
191 return full_vfp_access_check(s, false);
192}
193
194static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
195{
196 uint32_t rd, rn, rm;
197 bool dp = a->dp;
198
199 if (!dc_isar_feature(aa32_vsel, s)) {
200 return false;
201 }
202
203 /* UNDEF accesses to D16-D31 if they don't exist */
204 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
205 ((a->vm | a->vn | a->vd) & 0x10)) {
206 return false;
207 }
208
209 if (dp && !dc_isar_feature(aa32_fpdp, s)) {
210 return false;
211 }
212
213 rd = a->vd;
214 rn = a->vn;
215 rm = a->vm;
216
217 if (!vfp_access_check(s)) {
218 return true;
219 }
220
221 if (dp) {
222 TCGv_i64 frn, frm, dest;
223 TCGv_i64 tmp, zero, zf, nf, vf;
224
225 zero = tcg_const_i64(0);
226
227 frn = tcg_temp_new_i64();
228 frm = tcg_temp_new_i64();
229 dest = tcg_temp_new_i64();
230
231 zf = tcg_temp_new_i64();
232 nf = tcg_temp_new_i64();
233 vf = tcg_temp_new_i64();
234
235 tcg_gen_extu_i32_i64(zf, cpu_ZF);
236 tcg_gen_ext_i32_i64(nf, cpu_NF);
237 tcg_gen_ext_i32_i64(vf, cpu_VF);
238
239 neon_load_reg64(frn, rn);
240 neon_load_reg64(frm, rm);
241 switch (a->cc) {
242 case 0: /* eq: Z */
243 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
244 frn, frm);
245 break;
246 case 1: /* vs: V */
247 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
248 frn, frm);
249 break;
250 case 2: /* ge: N == V -> N ^ V == 0 */
251 tmp = tcg_temp_new_i64();
252 tcg_gen_xor_i64(tmp, vf, nf);
253 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
254 frn, frm);
255 tcg_temp_free_i64(tmp);
256 break;
257 case 3: /* gt: !Z && N == V */
258 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
259 frn, frm);
260 tmp = tcg_temp_new_i64();
261 tcg_gen_xor_i64(tmp, vf, nf);
262 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
263 dest, frm);
264 tcg_temp_free_i64(tmp);
265 break;
266 }
267 neon_store_reg64(dest, rd);
268 tcg_temp_free_i64(frn);
269 tcg_temp_free_i64(frm);
270 tcg_temp_free_i64(dest);
271
272 tcg_temp_free_i64(zf);
273 tcg_temp_free_i64(nf);
274 tcg_temp_free_i64(vf);
275
276 tcg_temp_free_i64(zero);
277 } else {
278 TCGv_i32 frn, frm, dest;
279 TCGv_i32 tmp, zero;
280
281 zero = tcg_const_i32(0);
282
283 frn = tcg_temp_new_i32();
284 frm = tcg_temp_new_i32();
285 dest = tcg_temp_new_i32();
286 neon_load_reg32(frn, rn);
287 neon_load_reg32(frm, rm);
288 switch (a->cc) {
289 case 0: /* eq: Z */
290 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
291 frn, frm);
292 break;
293 case 1: /* vs: V */
294 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
295 frn, frm);
296 break;
297 case 2: /* ge: N == V -> N ^ V == 0 */
298 tmp = tcg_temp_new_i32();
299 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
300 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
301 frn, frm);
302 tcg_temp_free_i32(tmp);
303 break;
304 case 3: /* gt: !Z && N == V */
305 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
306 frn, frm);
307 tmp = tcg_temp_new_i32();
308 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
309 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
310 dest, frm);
311 tcg_temp_free_i32(tmp);
312 break;
313 }
314 neon_store_reg32(dest, rd);
315 tcg_temp_free_i32(frn);
316 tcg_temp_free_i32(frm);
317 tcg_temp_free_i32(dest);
318
319 tcg_temp_free_i32(zero);
320 }
321
322 return true;
323}
324
325static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
326{
327 uint32_t rd, rn, rm;
328 bool dp = a->dp;
329 bool vmin = a->op;
330 TCGv_ptr fpst;
331
332 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
333 return false;
334 }
335
336 /* UNDEF accesses to D16-D31 if they don't exist */
337 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
338 ((a->vm | a->vn | a->vd) & 0x10)) {
339 return false;
340 }
341
342 if (dp && !dc_isar_feature(aa32_fpdp, s)) {
343 return false;
344 }
345
346 rd = a->vd;
347 rn = a->vn;
348 rm = a->vm;
349
350 if (!vfp_access_check(s)) {
351 return true;
352 }
353
354 fpst = get_fpstatus_ptr(0);
355
356 if (dp) {
357 TCGv_i64 frn, frm, dest;
358
359 frn = tcg_temp_new_i64();
360 frm = tcg_temp_new_i64();
361 dest = tcg_temp_new_i64();
362
363 neon_load_reg64(frn, rn);
364 neon_load_reg64(frm, rm);
365 if (vmin) {
366 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
367 } else {
368 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
369 }
370 neon_store_reg64(dest, rd);
371 tcg_temp_free_i64(frn);
372 tcg_temp_free_i64(frm);
373 tcg_temp_free_i64(dest);
374 } else {
375 TCGv_i32 frn, frm, dest;
376
377 frn = tcg_temp_new_i32();
378 frm = tcg_temp_new_i32();
379 dest = tcg_temp_new_i32();
380
381 neon_load_reg32(frn, rn);
382 neon_load_reg32(frm, rm);
383 if (vmin) {
384 gen_helper_vfp_minnums(dest, frn, frm, fpst);
385 } else {
386 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
387 }
388 neon_store_reg32(dest, rd);
389 tcg_temp_free_i32(frn);
390 tcg_temp_free_i32(frm);
391 tcg_temp_free_i32(dest);
392 }
393
394 tcg_temp_free_ptr(fpst);
395 return true;
396}
397
398/*
399 * Table for converting the most common AArch32 encoding of
400 * rounding mode to arm_fprounding order (which matches the
401 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
402 */
403static const uint8_t fp_decode_rm[] = {
404 FPROUNDING_TIEAWAY,
405 FPROUNDING_TIEEVEN,
406 FPROUNDING_POSINF,
407 FPROUNDING_NEGINF,
408};
409
410static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
411{
412 uint32_t rd, rm;
413 bool dp = a->dp;
414 TCGv_ptr fpst;
415 TCGv_i32 tcg_rmode;
416 int rounding = fp_decode_rm[a->rm];
417
418 if (!dc_isar_feature(aa32_vrint, s)) {
419 return false;
420 }
421
422 /* UNDEF accesses to D16-D31 if they don't exist */
423 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
424 ((a->vm | a->vd) & 0x10)) {
425 return false;
426 }
427
428 if (dp && !dc_isar_feature(aa32_fpdp, s)) {
429 return false;
430 }
431
432 rd = a->vd;
433 rm = a->vm;
434
435 if (!vfp_access_check(s)) {
436 return true;
437 }
438
439 fpst = get_fpstatus_ptr(0);
440
441 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
442 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
443
444 if (dp) {
445 TCGv_i64 tcg_op;
446 TCGv_i64 tcg_res;
447 tcg_op = tcg_temp_new_i64();
448 tcg_res = tcg_temp_new_i64();
449 neon_load_reg64(tcg_op, rm);
450 gen_helper_rintd(tcg_res, tcg_op, fpst);
451 neon_store_reg64(tcg_res, rd);
452 tcg_temp_free_i64(tcg_op);
453 tcg_temp_free_i64(tcg_res);
454 } else {
455 TCGv_i32 tcg_op;
456 TCGv_i32 tcg_res;
457 tcg_op = tcg_temp_new_i32();
458 tcg_res = tcg_temp_new_i32();
459 neon_load_reg32(tcg_op, rm);
460 gen_helper_rints(tcg_res, tcg_op, fpst);
461 neon_store_reg32(tcg_res, rd);
462 tcg_temp_free_i32(tcg_op);
463 tcg_temp_free_i32(tcg_res);
464 }
465
466 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
467 tcg_temp_free_i32(tcg_rmode);
468
469 tcg_temp_free_ptr(fpst);
470 return true;
471}
472
473static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
474{
475 uint32_t rd, rm;
476 bool dp = a->dp;
477 TCGv_ptr fpst;
478 TCGv_i32 tcg_rmode, tcg_shift;
479 int rounding = fp_decode_rm[a->rm];
480 bool is_signed = a->op;
481
482 if (!dc_isar_feature(aa32_vcvt_dr, s)) {
483 return false;
484 }
485
486 /* UNDEF accesses to D16-D31 if they don't exist */
487 if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
488 return false;
489 }
490
491 if (dp && !dc_isar_feature(aa32_fpdp, s)) {
492 return false;
493 }
494
495 rd = a->vd;
496 rm = a->vm;
497
498 if (!vfp_access_check(s)) {
499 return true;
500 }
501
502 fpst = get_fpstatus_ptr(0);
503
504 tcg_shift = tcg_const_i32(0);
505
506 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
507 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
508
509 if (dp) {
510 TCGv_i64 tcg_double, tcg_res;
511 TCGv_i32 tcg_tmp;
512 tcg_double = tcg_temp_new_i64();
513 tcg_res = tcg_temp_new_i64();
514 tcg_tmp = tcg_temp_new_i32();
515 neon_load_reg64(tcg_double, rm);
516 if (is_signed) {
517 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
518 } else {
519 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
520 }
521 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
522 neon_store_reg32(tcg_tmp, rd);
523 tcg_temp_free_i32(tcg_tmp);
524 tcg_temp_free_i64(tcg_res);
525 tcg_temp_free_i64(tcg_double);
526 } else {
527 TCGv_i32 tcg_single, tcg_res;
528 tcg_single = tcg_temp_new_i32();
529 tcg_res = tcg_temp_new_i32();
530 neon_load_reg32(tcg_single, rm);
531 if (is_signed) {
532 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
533 } else {
534 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
535 }
536 neon_store_reg32(tcg_res, rd);
537 tcg_temp_free_i32(tcg_res);
538 tcg_temp_free_i32(tcg_single);
539 }
540
541 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
542 tcg_temp_free_i32(tcg_rmode);
543
544 tcg_temp_free_i32(tcg_shift);
545
546 tcg_temp_free_ptr(fpst);
547
548 return true;
549}
550
551static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
552{
553 /* VMOV scalar to general purpose register */
554 TCGv_i32 tmp;
555 int pass;
556 uint32_t offset;
557
558 /* UNDEF accesses to D16-D31 if they don't exist */
559 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
560 return false;
561 }
562
563 offset = a->index << a->size;
564 pass = extract32(offset, 2, 1);
565 offset = extract32(offset, 0, 2) * 8;
566
567 if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
568 return false;
569 }
570
571 if (!vfp_access_check(s)) {
572 return true;
573 }
574
575 tmp = neon_load_reg(a->vn, pass);
576 switch (a->size) {
577 case 0:
578 if (offset) {
579 tcg_gen_shri_i32(tmp, tmp, offset);
580 }
581 if (a->u) {
582 gen_uxtb(tmp);
583 } else {
584 gen_sxtb(tmp);
585 }
586 break;
587 case 1:
588 if (a->u) {
589 if (offset) {
590 tcg_gen_shri_i32(tmp, tmp, 16);
591 } else {
592 gen_uxth(tmp);
593 }
594 } else {
595 if (offset) {
596 tcg_gen_sari_i32(tmp, tmp, 16);
597 } else {
598 gen_sxth(tmp);
599 }
600 }
601 break;
602 case 2:
603 break;
604 }
605 store_reg(s, a->rt, tmp);
606
607 return true;
608}
609
610static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
611{
612 /* VMOV general purpose register to scalar */
613 TCGv_i32 tmp, tmp2;
614 int pass;
615 uint32_t offset;
616
617 /* UNDEF accesses to D16-D31 if they don't exist */
618 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
619 return false;
620 }
621
622 offset = a->index << a->size;
623 pass = extract32(offset, 2, 1);
624 offset = extract32(offset, 0, 2) * 8;
625
626 if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
627 return false;
628 }
629
630 if (!vfp_access_check(s)) {
631 return true;
632 }
633
634 tmp = load_reg(s, a->rt);
635 switch (a->size) {
636 case 0:
637 tmp2 = neon_load_reg(a->vn, pass);
638 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
639 tcg_temp_free_i32(tmp2);
640 break;
641 case 1:
642 tmp2 = neon_load_reg(a->vn, pass);
643 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
644 tcg_temp_free_i32(tmp2);
645 break;
646 case 2:
647 break;
648 }
649 neon_store_reg(a->vn, pass, tmp);
650
651 return true;
652}
653
654static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
655{
656 /* VDUP (general purpose register) */
657 TCGv_i32 tmp;
658 int size, vec_size;
659
660 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
661 return false;
662 }
663
664 /* UNDEF accesses to D16-D31 if they don't exist */
665 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
666 return false;
667 }
668
669 if (a->b && a->e) {
670 return false;
671 }
672
673 if (a->q && (a->vn & 1)) {
674 return false;
675 }
676
677 vec_size = a->q ? 16 : 8;
678 if (a->b) {
679 size = 0;
680 } else if (a->e) {
681 size = 1;
682 } else {
683 size = 2;
684 }
685
686 if (!vfp_access_check(s)) {
687 return true;
688 }
689
690 tmp = load_reg(s, a->rt);
691 tcg_gen_gvec_dup_i32(size, neon_reg_offset(a->vn, 0),
692 vec_size, vec_size, tmp);
693 tcg_temp_free_i32(tmp);
694
695 return true;
696}
697
698static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
699{
700 TCGv_i32 tmp;
701 bool ignore_vfp_enabled = false;
702
703 if (arm_dc_feature(s, ARM_FEATURE_M)) {
704 /*
705 * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
706 * Writes to R15 are UNPREDICTABLE; we choose to undef.
707 */
708 if (a->rt == 15 || a->reg != ARM_VFP_FPSCR) {
709 return false;
710 }
711 }
712
713 switch (a->reg) {
714 case ARM_VFP_FPSID:
715 /*
716 * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
717 * all ID registers to privileged access only.
718 */
719 if (IS_USER(s) && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
720 return false;
721 }
722 ignore_vfp_enabled = true;
723 break;
724 case ARM_VFP_MVFR0:
725 case ARM_VFP_MVFR1:
726 if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
727 return false;
728 }
729 ignore_vfp_enabled = true;
730 break;
731 case ARM_VFP_MVFR2:
732 if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) {
733 return false;
734 }
735 ignore_vfp_enabled = true;
736 break;
737 case ARM_VFP_FPSCR:
738 break;
739 case ARM_VFP_FPEXC:
740 if (IS_USER(s)) {
741 return false;
742 }
743 ignore_vfp_enabled = true;
744 break;
745 case ARM_VFP_FPINST:
746 case ARM_VFP_FPINST2:
747 /* Not present in VFPv3 */
748 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
749 return false;
750 }
751 break;
752 default:
753 return false;
754 }
755
756 if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
757 return true;
758 }
759
760 if (a->l) {
761 /* VMRS, move VFP special register to gp register */
762 switch (a->reg) {
763 case ARM_VFP_FPSID:
764 case ARM_VFP_FPEXC:
765 case ARM_VFP_FPINST:
766 case ARM_VFP_FPINST2:
767 case ARM_VFP_MVFR0:
768 case ARM_VFP_MVFR1:
769 case ARM_VFP_MVFR2:
770 tmp = load_cpu_field(vfp.xregs[a->reg]);
771 break;
772 case ARM_VFP_FPSCR:
773 if (a->rt == 15) {
774 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
775 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
776 } else {
777 tmp = tcg_temp_new_i32();
778 gen_helper_vfp_get_fpscr(tmp, cpu_env);
779 }
780 break;
781 default:
782 g_assert_not_reached();
783 }
784
785 if (a->rt == 15) {
786 /* Set the 4 flag bits in the CPSR. */
787 gen_set_nzcv(tmp);
788 tcg_temp_free_i32(tmp);
789 } else {
790 store_reg(s, a->rt, tmp);
791 }
792 } else {
793 /* VMSR, move gp register to VFP special register */
794 switch (a->reg) {
795 case ARM_VFP_FPSID:
796 case ARM_VFP_MVFR0:
797 case ARM_VFP_MVFR1:
798 case ARM_VFP_MVFR2:
799 /* Writes are ignored. */
800 break;
801 case ARM_VFP_FPSCR:
802 tmp = load_reg(s, a->rt);
803 gen_helper_vfp_set_fpscr(cpu_env, tmp);
804 tcg_temp_free_i32(tmp);
805 gen_lookup_tb(s);
806 break;
807 case ARM_VFP_FPEXC:
808 /*
809 * TODO: VFP subarchitecture support.
810 * For now, keep the EN bit only
811 */
812 tmp = load_reg(s, a->rt);
813 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
814 store_cpu_field(tmp, vfp.xregs[a->reg]);
815 gen_lookup_tb(s);
816 break;
817 case ARM_VFP_FPINST:
818 case ARM_VFP_FPINST2:
819 tmp = load_reg(s, a->rt);
820 store_cpu_field(tmp, vfp.xregs[a->reg]);
821 break;
822 default:
823 g_assert_not_reached();
824 }
825 }
826
827 return true;
828}
829
830static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
831{
832 TCGv_i32 tmp;
833
834 if (!vfp_access_check(s)) {
835 return true;
836 }
837
838 if (a->l) {
839 /* VFP to general purpose register */
840 tmp = tcg_temp_new_i32();
841 neon_load_reg32(tmp, a->vn);
842 if (a->rt == 15) {
843 /* Set the 4 flag bits in the CPSR. */
844 gen_set_nzcv(tmp);
845 tcg_temp_free_i32(tmp);
846 } else {
847 store_reg(s, a->rt, tmp);
848 }
849 } else {
850 /* general purpose register to VFP */
851 tmp = load_reg(s, a->rt);
852 neon_store_reg32(tmp, a->vn);
853 tcg_temp_free_i32(tmp);
854 }
855
856 return true;
857}
858
859static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
860{
861 TCGv_i32 tmp;
862
863 /*
864 * VMOV between two general-purpose registers and two single precision
865 * floating point registers
866 */
867 if (!vfp_access_check(s)) {
868 return true;
869 }
870
871 if (a->op) {
872 /* fpreg to gpreg */
873 tmp = tcg_temp_new_i32();
874 neon_load_reg32(tmp, a->vm);
875 store_reg(s, a->rt, tmp);
876 tmp = tcg_temp_new_i32();
877 neon_load_reg32(tmp, a->vm + 1);
878 store_reg(s, a->rt2, tmp);
879 } else {
880 /* gpreg to fpreg */
881 tmp = load_reg(s, a->rt);
882 neon_store_reg32(tmp, a->vm);
883 tcg_temp_free_i32(tmp);
884 tmp = load_reg(s, a->rt2);
885 neon_store_reg32(tmp, a->vm + 1);
886 tcg_temp_free_i32(tmp);
887 }
888
889 return true;
890}
891
892static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
893{
894 TCGv_i32 tmp;
895
896 /*
897 * VMOV between two general-purpose registers and one double precision
898 * floating point register
899 */
900
901 /* UNDEF accesses to D16-D31 if they don't exist */
902 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
903 return false;
904 }
905
906 if (!vfp_access_check(s)) {
907 return true;
908 }
909
910 if (a->op) {
911 /* fpreg to gpreg */
912 tmp = tcg_temp_new_i32();
913 neon_load_reg32(tmp, a->vm * 2);
914 store_reg(s, a->rt, tmp);
915 tmp = tcg_temp_new_i32();
916 neon_load_reg32(tmp, a->vm * 2 + 1);
917 store_reg(s, a->rt2, tmp);
918 } else {
919 /* gpreg to fpreg */
920 tmp = load_reg(s, a->rt);
921 neon_store_reg32(tmp, a->vm * 2);
922 tcg_temp_free_i32(tmp);
923 tmp = load_reg(s, a->rt2);
924 neon_store_reg32(tmp, a->vm * 2 + 1);
925 tcg_temp_free_i32(tmp);
926 }
927
928 return true;
929}
930
931static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
932{
933 uint32_t offset;
934 TCGv_i32 addr, tmp;
935
936 if (!vfp_access_check(s)) {
937 return true;
938 }
939
940 offset = a->imm << 2;
941 if (!a->u) {
942 offset = -offset;
943 }
944
945 /* For thumb, use of PC is UNPREDICTABLE. */
946 addr = add_reg_for_lit(s, a->rn, offset);
947 tmp = tcg_temp_new_i32();
948 if (a->l) {
949 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
950 neon_store_reg32(tmp, a->vd);
951 } else {
952 neon_load_reg32(tmp, a->vd);
953 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
954 }
955 tcg_temp_free_i32(tmp);
956 tcg_temp_free_i32(addr);
957
958 return true;
959}
960
961static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
962{
963 uint32_t offset;
964 TCGv_i32 addr;
965 TCGv_i64 tmp;
966
967 /* UNDEF accesses to D16-D31 if they don't exist */
968 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
969 return false;
970 }
971
972 if (!vfp_access_check(s)) {
973 return true;
974 }
975
976 offset = a->imm << 2;
977 if (!a->u) {
978 offset = -offset;
979 }
980
981 /* For thumb, use of PC is UNPREDICTABLE. */
982 addr = add_reg_for_lit(s, a->rn, offset);
983 tmp = tcg_temp_new_i64();
984 if (a->l) {
985 gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
986 neon_store_reg64(tmp, a->vd);
987 } else {
988 neon_load_reg64(tmp, a->vd);
989 gen_aa32_st64(s, tmp, addr, get_mem_index(s));
990 }
991 tcg_temp_free_i64(tmp);
992 tcg_temp_free_i32(addr);
993
994 return true;
995}
996
997static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
998{
999 uint32_t offset;
1000 TCGv_i32 addr, tmp;
1001 int i, n;
1002
1003 n = a->imm;
1004
1005 if (n == 0 || (a->vd + n) > 32) {
1006 /*
1007 * UNPREDICTABLE cases for bad immediates: we choose to
1008 * UNDEF to avoid generating huge numbers of TCG ops
1009 */
1010 return false;
1011 }
1012 if (a->rn == 15 && a->w) {
1013 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1014 return false;
1015 }
1016
1017 if (!vfp_access_check(s)) {
1018 return true;
1019 }
1020
1021 /* For thumb, use of PC is UNPREDICTABLE. */
1022 addr = add_reg_for_lit(s, a->rn, 0);
1023 if (a->p) {
1024 /* pre-decrement */
1025 tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1026 }
1027
1028 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1029 /*
1030 * Here 'addr' is the lowest address we will store to,
1031 * and is either the old SP (if post-increment) or
1032 * the new SP (if pre-decrement). For post-increment
1033 * where the old value is below the limit and the new
1034 * value is above, it is UNKNOWN whether the limit check
1035 * triggers; we choose to trigger.
1036 */
1037 gen_helper_v8m_stackcheck(cpu_env, addr);
1038 }
1039
1040 offset = 4;
1041 tmp = tcg_temp_new_i32();
1042 for (i = 0; i < n; i++) {
1043 if (a->l) {
1044 /* load */
1045 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1046 neon_store_reg32(tmp, a->vd + i);
1047 } else {
1048 /* store */
1049 neon_load_reg32(tmp, a->vd + i);
1050 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1051 }
1052 tcg_gen_addi_i32(addr, addr, offset);
1053 }
1054 tcg_temp_free_i32(tmp);
1055 if (a->w) {
1056 /* writeback */
1057 if (a->p) {
1058 offset = -offset * n;
1059 tcg_gen_addi_i32(addr, addr, offset);
1060 }
1061 store_reg(s, a->rn, addr);
1062 } else {
1063 tcg_temp_free_i32(addr);
1064 }
1065
1066 return true;
1067}
1068
1069static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
1070{
1071 uint32_t offset;
1072 TCGv_i32 addr;
1073 TCGv_i64 tmp;
1074 int i, n;
1075
1076 n = a->imm >> 1;
1077
1078 if (n == 0 || (a->vd + n) > 32 || n > 16) {
1079 /*
1080 * UNPREDICTABLE cases for bad immediates: we choose to
1081 * UNDEF to avoid generating huge numbers of TCG ops
1082 */
1083 return false;
1084 }
1085 if (a->rn == 15 && a->w) {
1086 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1087 return false;
1088 }
1089
1090 /* UNDEF accesses to D16-D31 if they don't exist */
1091 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd + n) > 16) {
1092 return false;
1093 }
1094
1095 if (!vfp_access_check(s)) {
1096 return true;
1097 }
1098
1099 /* For thumb, use of PC is UNPREDICTABLE. */
1100 addr = add_reg_for_lit(s, a->rn, 0);
1101 if (a->p) {
1102 /* pre-decrement */
1103 tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1104 }
1105
1106 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1107 /*
1108 * Here 'addr' is the lowest address we will store to,
1109 * and is either the old SP (if post-increment) or
1110 * the new SP (if pre-decrement). For post-increment
1111 * where the old value is below the limit and the new
1112 * value is above, it is UNKNOWN whether the limit check
1113 * triggers; we choose to trigger.
1114 */
1115 gen_helper_v8m_stackcheck(cpu_env, addr);
1116 }
1117
1118 offset = 8;
1119 tmp = tcg_temp_new_i64();
1120 for (i = 0; i < n; i++) {
1121 if (a->l) {
1122 /* load */
1123 gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
1124 neon_store_reg64(tmp, a->vd + i);
1125 } else {
1126 /* store */
1127 neon_load_reg64(tmp, a->vd + i);
1128 gen_aa32_st64(s, tmp, addr, get_mem_index(s));
1129 }
1130 tcg_gen_addi_i32(addr, addr, offset);
1131 }
1132 tcg_temp_free_i64(tmp);
1133 if (a->w) {
1134 /* writeback */
1135 if (a->p) {
1136 offset = -offset * n;
1137 } else if (a->imm & 1) {
1138 offset = 4;
1139 } else {
1140 offset = 0;
1141 }
1142
1143 if (offset != 0) {
1144 tcg_gen_addi_i32(addr, addr, offset);
1145 }
1146 store_reg(s, a->rn, addr);
1147 } else {
1148 tcg_temp_free_i32(addr);
1149 }
1150
1151 return true;
1152}
1153
1154/*
1155 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1156 * The callback should emit code to write a value to vd. If
1157 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1158 * will contain the old value of the relevant VFP register;
1159 * otherwise it must be written to only.
1160 */
1161typedef void VFPGen3OpSPFn(TCGv_i32 vd,
1162 TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst);
1163typedef void VFPGen3OpDPFn(TCGv_i64 vd,
1164 TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
1165
1166/*
1167 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1168 * The callback should emit code to write a value to vd (which
1169 * should be written to only).
1170 */
1171typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
1172typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
1173
1174/*
1175 * Return true if the specified S reg is in a scalar bank
1176 * (ie if it is s0..s7)
1177 */
1178static inline bool vfp_sreg_is_scalar(int reg)
1179{
1180 return (reg & 0x18) == 0;
1181}
1182
1183/*
1184 * Return true if the specified D reg is in a scalar bank
1185 * (ie if it is d0..d3 or d16..d19)
1186 */
1187static inline bool vfp_dreg_is_scalar(int reg)
1188{
1189 return (reg & 0xc) == 0;
1190}
1191
1192/*
1193 * Advance the S reg number forwards by delta within its bank
1194 * (ie increment the low 3 bits but leave the rest the same)
1195 */
1196static inline int vfp_advance_sreg(int reg, int delta)
1197{
1198 return ((reg + delta) & 0x7) | (reg & ~0x7);
1199}
1200
1201/*
1202 * Advance the D reg number forwards by delta within its bank
1203 * (ie increment the low 2 bits but leave the rest the same)
1204 */
1205static inline int vfp_advance_dreg(int reg, int delta)
1206{
1207 return ((reg + delta) & 0x3) | (reg & ~0x3);
1208}
1209
1210/*
1211 * Perform a 3-operand VFP data processing instruction. fn is the
1212 * callback to do the actual operation; this function deals with the
1213 * code to handle looping around for VFP vector processing.
1214 */
1215static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
1216 int vd, int vn, int vm, bool reads_vd)
1217{
1218 uint32_t delta_m = 0;
1219 uint32_t delta_d = 0;
1220 int veclen = s->vec_len;
1221 TCGv_i32 f0, f1, fd;
1222 TCGv_ptr fpst;
1223
1224 if (!dc_isar_feature(aa32_fpshvec, s) &&
1225 (veclen != 0 || s->vec_stride != 0)) {
1226 return false;
1227 }
1228
1229 if (!vfp_access_check(s)) {
1230 return true;
1231 }
1232
1233 if (veclen > 0) {
1234 /* Figure out what type of vector operation this is. */
1235 if (vfp_sreg_is_scalar(vd)) {
1236 /* scalar */
1237 veclen = 0;
1238 } else {
1239 delta_d = s->vec_stride + 1;
1240
1241 if (vfp_sreg_is_scalar(vm)) {
1242 /* mixed scalar/vector */
1243 delta_m = 0;
1244 } else {
1245 /* vector */
1246 delta_m = delta_d;
1247 }
1248 }
1249 }
1250
1251 f0 = tcg_temp_new_i32();
1252 f1 = tcg_temp_new_i32();
1253 fd = tcg_temp_new_i32();
1254 fpst = get_fpstatus_ptr(0);
1255
1256 neon_load_reg32(f0, vn);
1257 neon_load_reg32(f1, vm);
1258
1259 for (;;) {
1260 if (reads_vd) {
1261 neon_load_reg32(fd, vd);
1262 }
1263 fn(fd, f0, f1, fpst);
1264 neon_store_reg32(fd, vd);
1265
1266 if (veclen == 0) {
1267 break;
1268 }
1269
1270 /* Set up the operands for the next iteration */
1271 veclen--;
1272 vd = vfp_advance_sreg(vd, delta_d);
1273 vn = vfp_advance_sreg(vn, delta_d);
1274 neon_load_reg32(f0, vn);
1275 if (delta_m) {
1276 vm = vfp_advance_sreg(vm, delta_m);
1277 neon_load_reg32(f1, vm);
1278 }
1279 }
1280
1281 tcg_temp_free_i32(f0);
1282 tcg_temp_free_i32(f1);
1283 tcg_temp_free_i32(fd);
1284 tcg_temp_free_ptr(fpst);
1285
1286 return true;
1287}
1288
1289static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
1290 int vd, int vn, int vm, bool reads_vd)
1291{
1292 uint32_t delta_m = 0;
1293 uint32_t delta_d = 0;
1294 int veclen = s->vec_len;
1295 TCGv_i64 f0, f1, fd;
1296 TCGv_ptr fpst;
1297
1298 /* UNDEF accesses to D16-D31 if they don't exist */
1299 if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vn | vm) & 0x10)) {
1300 return false;
1301 }
1302
1303 if (!dc_isar_feature(aa32_fpdp, s)) {
1304 return false;
1305 }
1306
1307 if (!dc_isar_feature(aa32_fpshvec, s) &&
1308 (veclen != 0 || s->vec_stride != 0)) {
1309 return false;
1310 }
1311
1312 if (!vfp_access_check(s)) {
1313 return true;
1314 }
1315
1316 if (veclen > 0) {
1317 /* Figure out what type of vector operation this is. */
1318 if (vfp_dreg_is_scalar(vd)) {
1319 /* scalar */
1320 veclen = 0;
1321 } else {
1322 delta_d = (s->vec_stride >> 1) + 1;
1323
1324 if (vfp_dreg_is_scalar(vm)) {
1325 /* mixed scalar/vector */
1326 delta_m = 0;
1327 } else {
1328 /* vector */
1329 delta_m = delta_d;
1330 }
1331 }
1332 }
1333
1334 f0 = tcg_temp_new_i64();
1335 f1 = tcg_temp_new_i64();
1336 fd = tcg_temp_new_i64();
1337 fpst = get_fpstatus_ptr(0);
1338
1339 neon_load_reg64(f0, vn);
1340 neon_load_reg64(f1, vm);
1341
1342 for (;;) {
1343 if (reads_vd) {
1344 neon_load_reg64(fd, vd);
1345 }
1346 fn(fd, f0, f1, fpst);
1347 neon_store_reg64(fd, vd);
1348
1349 if (veclen == 0) {
1350 break;
1351 }
1352 /* Set up the operands for the next iteration */
1353 veclen--;
1354 vd = vfp_advance_dreg(vd, delta_d);
1355 vn = vfp_advance_dreg(vn, delta_d);
1356 neon_load_reg64(f0, vn);
1357 if (delta_m) {
1358 vm = vfp_advance_dreg(vm, delta_m);
1359 neon_load_reg64(f1, vm);
1360 }
1361 }
1362
1363 tcg_temp_free_i64(f0);
1364 tcg_temp_free_i64(f1);
1365 tcg_temp_free_i64(fd);
1366 tcg_temp_free_ptr(fpst);
1367
1368 return true;
1369}
1370
1371static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
1372{
1373 uint32_t delta_m = 0;
1374 uint32_t delta_d = 0;
1375 int veclen = s->vec_len;
1376 TCGv_i32 f0, fd;
1377
1378 if (!dc_isar_feature(aa32_fpshvec, s) &&
1379 (veclen != 0 || s->vec_stride != 0)) {
1380 return false;
1381 }
1382
1383 if (!vfp_access_check(s)) {
1384 return true;
1385 }
1386
1387 if (veclen > 0) {
1388 /* Figure out what type of vector operation this is. */
1389 if (vfp_sreg_is_scalar(vd)) {
1390 /* scalar */
1391 veclen = 0;
1392 } else {
1393 delta_d = s->vec_stride + 1;
1394
1395 if (vfp_sreg_is_scalar(vm)) {
1396 /* mixed scalar/vector */
1397 delta_m = 0;
1398 } else {
1399 /* vector */
1400 delta_m = delta_d;
1401 }
1402 }
1403 }
1404
1405 f0 = tcg_temp_new_i32();
1406 fd = tcg_temp_new_i32();
1407
1408 neon_load_reg32(f0, vm);
1409
1410 for (;;) {
1411 fn(fd, f0);
1412 neon_store_reg32(fd, vd);
1413
1414 if (veclen == 0) {
1415 break;
1416 }
1417
1418 if (delta_m == 0) {
1419 /* single source one-many */
1420 while (veclen--) {
1421 vd = vfp_advance_sreg(vd, delta_d);
1422 neon_store_reg32(fd, vd);
1423 }
1424 break;
1425 }
1426
1427 /* Set up the operands for the next iteration */
1428 veclen--;
1429 vd = vfp_advance_sreg(vd, delta_d);
1430 vm = vfp_advance_sreg(vm, delta_m);
1431 neon_load_reg32(f0, vm);
1432 }
1433
1434 tcg_temp_free_i32(f0);
1435 tcg_temp_free_i32(fd);
1436
1437 return true;
1438}
1439
1440static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
1441{
1442 uint32_t delta_m = 0;
1443 uint32_t delta_d = 0;
1444 int veclen = s->vec_len;
1445 TCGv_i64 f0, fd;
1446
1447 /* UNDEF accesses to D16-D31 if they don't exist */
1448 if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vm) & 0x10)) {
1449 return false;
1450 }
1451
1452 if (!dc_isar_feature(aa32_fpdp, s)) {
1453 return false;
1454 }
1455
1456 if (!dc_isar_feature(aa32_fpshvec, s) &&
1457 (veclen != 0 || s->vec_stride != 0)) {
1458 return false;
1459 }
1460
1461 if (!vfp_access_check(s)) {
1462 return true;
1463 }
1464
1465 if (veclen > 0) {
1466 /* Figure out what type of vector operation this is. */
1467 if (vfp_dreg_is_scalar(vd)) {
1468 /* scalar */
1469 veclen = 0;
1470 } else {
1471 delta_d = (s->vec_stride >> 1) + 1;
1472
1473 if (vfp_dreg_is_scalar(vm)) {
1474 /* mixed scalar/vector */
1475 delta_m = 0;
1476 } else {
1477 /* vector */
1478 delta_m = delta_d;
1479 }
1480 }
1481 }
1482
1483 f0 = tcg_temp_new_i64();
1484 fd = tcg_temp_new_i64();
1485
1486 neon_load_reg64(f0, vm);
1487
1488 for (;;) {
1489 fn(fd, f0);
1490 neon_store_reg64(fd, vd);
1491
1492 if (veclen == 0) {
1493 break;
1494 }
1495
1496 if (delta_m == 0) {
1497 /* single source one-many */
1498 while (veclen--) {
1499 vd = vfp_advance_dreg(vd, delta_d);
1500 neon_store_reg64(fd, vd);
1501 }
1502 break;
1503 }
1504
1505 /* Set up the operands for the next iteration */
1506 veclen--;
1507 vd = vfp_advance_dreg(vd, delta_d);
1508 vd = vfp_advance_dreg(vm, delta_m);
1509 neon_load_reg64(f0, vm);
1510 }
1511
1512 tcg_temp_free_i64(f0);
1513 tcg_temp_free_i64(fd);
1514
1515 return true;
1516}
1517
1518static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1519{
1520 /* Note that order of inputs to the add matters for NaNs */
1521 TCGv_i32 tmp = tcg_temp_new_i32();
1522
1523 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1524 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1525 tcg_temp_free_i32(tmp);
1526}
1527
1528static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a)
1529{
1530 return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true);
1531}
1532
1533static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1534{
1535 /* Note that order of inputs to the add matters for NaNs */
1536 TCGv_i64 tmp = tcg_temp_new_i64();
1537
1538 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1539 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1540 tcg_temp_free_i64(tmp);
1541}
1542
1543static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a)
1544{
1545 return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
1546}
1547
1548static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1549{
1550 /*
1551 * VMLS: vd = vd + -(vn * vm)
1552 * Note that order of inputs to the add matters for NaNs.
1553 */
1554 TCGv_i32 tmp = tcg_temp_new_i32();
1555
1556 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1557 gen_helper_vfp_negs(tmp, tmp);
1558 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1559 tcg_temp_free_i32(tmp);
1560}
1561
1562static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a)
1563{
1564 return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true);
1565}
1566
1567static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1568{
1569 /*
1570 * VMLS: vd = vd + -(vn * vm)
1571 * Note that order of inputs to the add matters for NaNs.
1572 */
1573 TCGv_i64 tmp = tcg_temp_new_i64();
1574
1575 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1576 gen_helper_vfp_negd(tmp, tmp);
1577 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1578 tcg_temp_free_i64(tmp);
1579}
1580
1581static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a)
1582{
1583 return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
1584}
1585
1586static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1587{
1588 /*
1589 * VNMLS: -fd + (fn * fm)
1590 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1591 * plausible looking simplifications because this will give wrong results
1592 * for NaNs.
1593 */
1594 TCGv_i32 tmp = tcg_temp_new_i32();
1595
1596 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1597 gen_helper_vfp_negs(vd, vd);
1598 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1599 tcg_temp_free_i32(tmp);
1600}
1601
1602static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a)
1603{
1604 return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true);
1605}
1606
1607static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1608{
1609 /*
1610 * VNMLS: -fd + (fn * fm)
1611 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1612 * plausible looking simplifications because this will give wrong results
1613 * for NaNs.
1614 */
1615 TCGv_i64 tmp = tcg_temp_new_i64();
1616
1617 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1618 gen_helper_vfp_negd(vd, vd);
1619 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1620 tcg_temp_free_i64(tmp);
1621}
1622
1623static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a)
1624{
1625 return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
1626}
1627
1628static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1629{
1630 /* VNMLA: -fd + -(fn * fm) */
1631 TCGv_i32 tmp = tcg_temp_new_i32();
1632
1633 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1634 gen_helper_vfp_negs(tmp, tmp);
1635 gen_helper_vfp_negs(vd, vd);
1636 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1637 tcg_temp_free_i32(tmp);
1638}
1639
1640static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a)
1641{
1642 return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true);
1643}
1644
1645static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1646{
1647 /* VNMLA: -fd + (fn * fm) */
1648 TCGv_i64 tmp = tcg_temp_new_i64();
1649
1650 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1651 gen_helper_vfp_negd(tmp, tmp);
1652 gen_helper_vfp_negd(vd, vd);
1653 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1654 tcg_temp_free_i64(tmp);
1655}
1656
1657static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a)
1658{
1659 return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
1660}
1661
1662static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
1663{
1664 return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
1665}
1666
1667static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a)
1668{
1669 return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
1670}
1671
1672static void gen_VNMUL_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1673{
1674 /* VNMUL: -(fn * fm) */
1675 gen_helper_vfp_muls(vd, vn, vm, fpst);
1676 gen_helper_vfp_negs(vd, vd);
1677}
1678
1679static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a)
1680{
1681 return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false);
1682}
1683
1684static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1685{
1686 /* VNMUL: -(fn * fm) */
1687 gen_helper_vfp_muld(vd, vn, vm, fpst);
1688 gen_helper_vfp_negd(vd, vd);
1689}
1690
1691static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a)
1692{
1693 return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
1694}
1695
1696static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
1697{
1698 return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
1699}
1700
1701static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a)
1702{
1703 return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
1704}
1705
1706static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
1707{
1708 return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
1709}
1710
1711static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a)
1712{
1713 return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
1714}
1715
1716static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
1717{
1718 return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
1719}
1720
1721static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a)
1722{
1723 return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
1724}
1725
1726static bool trans_VFM_sp(DisasContext *s, arg_VFM_sp *a)
1727{
1728 /*
1729 * VFNMA : fd = muladd(-fd, fn, fm)
1730 * VFNMS : fd = muladd(-fd, -fn, fm)
1731 * VFMA : fd = muladd( fd, fn, fm)
1732 * VFMS : fd = muladd( fd, -fn, fm)
1733 *
1734 * These are fused multiply-add, and must be done as one floating
1735 * point operation with no rounding between the multiplication and
1736 * addition steps. NB that doing the negations here as separate
1737 * steps is correct : an input NaN should come out with its sign
1738 * bit flipped if it is a negated-input.
1739 */
1740 TCGv_ptr fpst;
1741 TCGv_i32 vn, vm, vd;
1742
1743 /*
1744 * Present in VFPv4 only.
1745 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1746 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1747 */
1748 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) ||
1749 (s->vec_len != 0 || s->vec_stride != 0)) {
1750 return false;
1751 }
1752
1753 if (!vfp_access_check(s)) {
1754 return true;
1755 }
1756
1757 vn = tcg_temp_new_i32();
1758 vm = tcg_temp_new_i32();
1759 vd = tcg_temp_new_i32();
1760
1761 neon_load_reg32(vn, a->vn);
1762 neon_load_reg32(vm, a->vm);
1763 if (a->o2) {
1764 /* VFNMS, VFMS */
1765 gen_helper_vfp_negs(vn, vn);
1766 }
1767 neon_load_reg32(vd, a->vd);
1768 if (a->o1 & 1) {
1769 /* VFNMA, VFNMS */
1770 gen_helper_vfp_negs(vd, vd);
1771 }
1772 fpst = get_fpstatus_ptr(0);
1773 gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
1774 neon_store_reg32(vd, a->vd);
1775
1776 tcg_temp_free_ptr(fpst);
1777 tcg_temp_free_i32(vn);
1778 tcg_temp_free_i32(vm);
1779 tcg_temp_free_i32(vd);
1780
1781 return true;
1782}
1783
1784static bool trans_VFM_dp(DisasContext *s, arg_VFM_dp *a)
1785{
1786 /*
1787 * VFNMA : fd = muladd(-fd, fn, fm)
1788 * VFNMS : fd = muladd(-fd, -fn, fm)
1789 * VFMA : fd = muladd( fd, fn, fm)
1790 * VFMS : fd = muladd( fd, -fn, fm)
1791 *
1792 * These are fused multiply-add, and must be done as one floating
1793 * point operation with no rounding between the multiplication and
1794 * addition steps. NB that doing the negations here as separate
1795 * steps is correct : an input NaN should come out with its sign
1796 * bit flipped if it is a negated-input.
1797 */
1798 TCGv_ptr fpst;
1799 TCGv_i64 vn, vm, vd;
1800
1801 /*
1802 * Present in VFPv4 only.
1803 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1804 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1805 */
1806 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) ||
1807 (s->vec_len != 0 || s->vec_stride != 0)) {
1808 return false;
1809 }
1810
1811 /* UNDEF accesses to D16-D31 if they don't exist. */
1812 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vn | a->vm) & 0x10)) {
1813 return false;
1814 }
1815
1816 if (!dc_isar_feature(aa32_fpdp, s)) {
1817 return false;
1818 }
1819
1820 if (!vfp_access_check(s)) {
1821 return true;
1822 }
1823
1824 vn = tcg_temp_new_i64();
1825 vm = tcg_temp_new_i64();
1826 vd = tcg_temp_new_i64();
1827
1828 neon_load_reg64(vn, a->vn);
1829 neon_load_reg64(vm, a->vm);
1830 if (a->o2) {
1831 /* VFNMS, VFMS */
1832 gen_helper_vfp_negd(vn, vn);
1833 }
1834 neon_load_reg64(vd, a->vd);
1835 if (a->o1 & 1) {
1836 /* VFNMA, VFNMS */
1837 gen_helper_vfp_negd(vd, vd);
1838 }
1839 fpst = get_fpstatus_ptr(0);
1840 gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
1841 neon_store_reg64(vd, a->vd);
1842
1843 tcg_temp_free_ptr(fpst);
1844 tcg_temp_free_i64(vn);
1845 tcg_temp_free_i64(vm);
1846 tcg_temp_free_i64(vd);
1847
1848 return true;
1849}
1850
1851static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
1852{
1853 uint32_t delta_d = 0;
1854 int veclen = s->vec_len;
1855 TCGv_i32 fd;
1856 uint32_t vd;
1857
1858 vd = a->vd;
1859
1860 if (!dc_isar_feature(aa32_fpshvec, s) &&
1861 (veclen != 0 || s->vec_stride != 0)) {
1862 return false;
1863 }
1864
1865 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
1866 return false;
1867 }
1868
1869 if (!vfp_access_check(s)) {
1870 return true;
1871 }
1872
1873 if (veclen > 0) {
1874 /* Figure out what type of vector operation this is. */
1875 if (vfp_sreg_is_scalar(vd)) {
1876 /* scalar */
1877 veclen = 0;
1878 } else {
1879 delta_d = s->vec_stride + 1;
1880 }
1881 }
1882
1883 fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
1884
1885 for (;;) {
1886 neon_store_reg32(fd, vd);
1887
1888 if (veclen == 0) {
1889 break;
1890 }
1891
1892 /* Set up the operands for the next iteration */
1893 veclen--;
1894 vd = vfp_advance_sreg(vd, delta_d);
1895 }
1896
1897 tcg_temp_free_i32(fd);
1898 return true;
1899}
1900
1901static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
1902{
1903 uint32_t delta_d = 0;
1904 int veclen = s->vec_len;
1905 TCGv_i64 fd;
1906 uint32_t vd;
1907
1908 vd = a->vd;
1909
1910 /* UNDEF accesses to D16-D31 if they don't exist. */
1911 if (!dc_isar_feature(aa32_fp_d32, s) && (vd & 0x10)) {
1912 return false;
1913 }
1914
1915 if (!dc_isar_feature(aa32_fpdp, s)) {
1916 return false;
1917 }
1918
1919 if (!dc_isar_feature(aa32_fpshvec, s) &&
1920 (veclen != 0 || s->vec_stride != 0)) {
1921 return false;
1922 }
1923
1924 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
1925 return false;
1926 }
1927
1928 if (!vfp_access_check(s)) {
1929 return true;
1930 }
1931
1932 if (veclen > 0) {
1933 /* Figure out what type of vector operation this is. */
1934 if (vfp_dreg_is_scalar(vd)) {
1935 /* scalar */
1936 veclen = 0;
1937 } else {
1938 delta_d = (s->vec_stride >> 1) + 1;
1939 }
1940 }
1941
1942 fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
1943
1944 for (;;) {
1945 neon_store_reg64(fd, vd);
1946
1947 if (veclen == 0) {
1948 break;
1949 }
1950
1951 /* Set up the operands for the next iteration */
1952 veclen--;
1953 vd = vfp_advance_dreg(vd, delta_d);
1954 }
1955
1956 tcg_temp_free_i64(fd);
1957 return true;
1958}
1959
1960static bool trans_VMOV_reg_sp(DisasContext *s, arg_VMOV_reg_sp *a)
1961{
1962 return do_vfp_2op_sp(s, tcg_gen_mov_i32, a->vd, a->vm);
1963}
1964
1965static bool trans_VMOV_reg_dp(DisasContext *s, arg_VMOV_reg_dp *a)
1966{
1967 return do_vfp_2op_dp(s, tcg_gen_mov_i64, a->vd, a->vm);
1968}
1969
1970static bool trans_VABS_sp(DisasContext *s, arg_VABS_sp *a)
1971{
1972 return do_vfp_2op_sp(s, gen_helper_vfp_abss, a->vd, a->vm);
1973}
1974
1975static bool trans_VABS_dp(DisasContext *s, arg_VABS_dp *a)
1976{
1977 return do_vfp_2op_dp(s, gen_helper_vfp_absd, a->vd, a->vm);
1978}
1979
1980static bool trans_VNEG_sp(DisasContext *s, arg_VNEG_sp *a)
1981{
1982 return do_vfp_2op_sp(s, gen_helper_vfp_negs, a->vd, a->vm);
1983}
1984
1985static bool trans_VNEG_dp(DisasContext *s, arg_VNEG_dp *a)
1986{
1987 return do_vfp_2op_dp(s, gen_helper_vfp_negd, a->vd, a->vm);
1988}
1989
1990static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
1991{
1992 gen_helper_vfp_sqrts(vd, vm, cpu_env);
1993}
1994
1995static bool trans_VSQRT_sp(DisasContext *s, arg_VSQRT_sp *a)
1996{
1997 return do_vfp_2op_sp(s, gen_VSQRT_sp, a->vd, a->vm);
1998}
1999
2000static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
2001{
2002 gen_helper_vfp_sqrtd(vd, vm, cpu_env);
2003}
2004
2005static bool trans_VSQRT_dp(DisasContext *s, arg_VSQRT_dp *a)
2006{
2007 return do_vfp_2op_dp(s, gen_VSQRT_dp, a->vd, a->vm);
2008}
2009
2010static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
2011{
2012 TCGv_i32 vd, vm;
2013
2014 /* Vm/M bits must be zero for the Z variant */
2015 if (a->z && a->vm != 0) {
2016 return false;
2017 }
2018
2019 if (!vfp_access_check(s)) {
2020 return true;
2021 }
2022
2023 vd = tcg_temp_new_i32();
2024 vm = tcg_temp_new_i32();
2025
2026 neon_load_reg32(vd, a->vd);
2027 if (a->z) {
2028 tcg_gen_movi_i32(vm, 0);
2029 } else {
2030 neon_load_reg32(vm, a->vm);
2031 }
2032
2033 if (a->e) {
2034 gen_helper_vfp_cmpes(vd, vm, cpu_env);
2035 } else {
2036 gen_helper_vfp_cmps(vd, vm, cpu_env);
2037 }
2038
2039 tcg_temp_free_i32(vd);
2040 tcg_temp_free_i32(vm);
2041
2042 return true;
2043}
2044
2045static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
2046{
2047 TCGv_i64 vd, vm;
2048
2049 /* Vm/M bits must be zero for the Z variant */
2050 if (a->z && a->vm != 0) {
2051 return false;
2052 }
2053
2054 /* UNDEF accesses to D16-D31 if they don't exist. */
2055 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2056 return false;
2057 }
2058
2059 if (!dc_isar_feature(aa32_fpdp, s)) {
2060 return false;
2061 }
2062
2063 if (!vfp_access_check(s)) {
2064 return true;
2065 }
2066
2067 vd = tcg_temp_new_i64();
2068 vm = tcg_temp_new_i64();
2069
2070 neon_load_reg64(vd, a->vd);
2071 if (a->z) {
2072 tcg_gen_movi_i64(vm, 0);
2073 } else {
2074 neon_load_reg64(vm, a->vm);
2075 }
2076
2077 if (a->e) {
2078 gen_helper_vfp_cmped(vd, vm, cpu_env);
2079 } else {
2080 gen_helper_vfp_cmpd(vd, vm, cpu_env);
2081 }
2082
2083 tcg_temp_free_i64(vd);
2084 tcg_temp_free_i64(vm);
2085
2086 return true;
2087}
2088
2089static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
2090{
2091 TCGv_ptr fpst;
2092 TCGv_i32 ahp_mode;
2093 TCGv_i32 tmp;
2094
2095 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2096 return false;
2097 }
2098
2099 if (!vfp_access_check(s)) {
2100 return true;
2101 }
2102
2103 fpst = get_fpstatus_ptr(false);
2104 ahp_mode = get_ahp_flag();
2105 tmp = tcg_temp_new_i32();
2106 /* The T bit tells us if we want the low or high 16 bits of Vm */
2107 tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2108 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
2109 neon_store_reg32(tmp, a->vd);
2110 tcg_temp_free_i32(ahp_mode);
2111 tcg_temp_free_ptr(fpst);
2112 tcg_temp_free_i32(tmp);
2113 return true;
2114}
2115
2116static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
2117{
2118 TCGv_ptr fpst;
2119 TCGv_i32 ahp_mode;
2120 TCGv_i32 tmp;
2121 TCGv_i64 vd;
2122
2123 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2124 return false;
2125 }
2126
2127 /* UNDEF accesses to D16-D31 if they don't exist. */
2128 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2129 return false;
2130 }
2131
2132 if (!dc_isar_feature(aa32_fpdp, s)) {
2133 return false;
2134 }
2135
2136 if (!vfp_access_check(s)) {
2137 return true;
2138 }
2139
2140 fpst = get_fpstatus_ptr(false);
2141 ahp_mode = get_ahp_flag();
2142 tmp = tcg_temp_new_i32();
2143 /* The T bit tells us if we want the low or high 16 bits of Vm */
2144 tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2145 vd = tcg_temp_new_i64();
2146 gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
2147 neon_store_reg64(vd, a->vd);
2148 tcg_temp_free_i32(ahp_mode);
2149 tcg_temp_free_ptr(fpst);
2150 tcg_temp_free_i32(tmp);
2151 tcg_temp_free_i64(vd);
2152 return true;
2153}
2154
2155static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
2156{
2157 TCGv_ptr fpst;
2158 TCGv_i32 ahp_mode;
2159 TCGv_i32 tmp;
2160
2161 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2162 return false;
2163 }
2164
2165 if (!vfp_access_check(s)) {
2166 return true;
2167 }
2168
2169 fpst = get_fpstatus_ptr(false);
2170 ahp_mode = get_ahp_flag();
2171 tmp = tcg_temp_new_i32();
2172
2173 neon_load_reg32(tmp, a->vm);
2174 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
2175 tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2176 tcg_temp_free_i32(ahp_mode);
2177 tcg_temp_free_ptr(fpst);
2178 tcg_temp_free_i32(tmp);
2179 return true;
2180}
2181
2182static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
2183{
2184 TCGv_ptr fpst;
2185 TCGv_i32 ahp_mode;
2186 TCGv_i32 tmp;
2187 TCGv_i64 vm;
2188
2189 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2190 return false;
2191 }
2192
2193 /* UNDEF accesses to D16-D31 if they don't exist. */
2194 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2195 return false;
2196 }
2197
2198 if (!dc_isar_feature(aa32_fpdp, s)) {
2199 return false;
2200 }
2201
2202 if (!vfp_access_check(s)) {
2203 return true;
2204 }
2205
2206 fpst = get_fpstatus_ptr(false);
2207 ahp_mode = get_ahp_flag();
2208 tmp = tcg_temp_new_i32();
2209 vm = tcg_temp_new_i64();
2210
2211 neon_load_reg64(vm, a->vm);
2212 gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
2213 tcg_temp_free_i64(vm);
2214 tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2215 tcg_temp_free_i32(ahp_mode);
2216 tcg_temp_free_ptr(fpst);
2217 tcg_temp_free_i32(tmp);
2218 return true;
2219}
2220
2221static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
2222{
2223 TCGv_ptr fpst;
2224 TCGv_i32 tmp;
2225
2226 if (!dc_isar_feature(aa32_vrint, s)) {
2227 return false;
2228 }
2229
2230 if (!vfp_access_check(s)) {
2231 return true;
2232 }
2233
2234 tmp = tcg_temp_new_i32();
2235 neon_load_reg32(tmp, a->vm);
2236 fpst = get_fpstatus_ptr(false);
2237 gen_helper_rints(tmp, tmp, fpst);
2238 neon_store_reg32(tmp, a->vd);
2239 tcg_temp_free_ptr(fpst);
2240 tcg_temp_free_i32(tmp);
2241 return true;
2242}
2243
2244static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
2245{
2246 TCGv_ptr fpst;
2247 TCGv_i64 tmp;
2248
2249 if (!dc_isar_feature(aa32_vrint, s)) {
2250 return false;
2251 }
2252
2253 /* UNDEF accesses to D16-D31 if they don't exist. */
2254 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2255 return false;
2256 }
2257
2258 if (!dc_isar_feature(aa32_fpdp, s)) {
2259 return false;
2260 }
2261
2262 if (!vfp_access_check(s)) {
2263 return true;
2264 }
2265
2266 tmp = tcg_temp_new_i64();
2267 neon_load_reg64(tmp, a->vm);
2268 fpst = get_fpstatus_ptr(false);
2269 gen_helper_rintd(tmp, tmp, fpst);
2270 neon_store_reg64(tmp, a->vd);
2271 tcg_temp_free_ptr(fpst);
2272 tcg_temp_free_i64(tmp);
2273 return true;
2274}
2275
2276static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
2277{
2278 TCGv_ptr fpst;
2279 TCGv_i32 tmp;
2280 TCGv_i32 tcg_rmode;
2281
2282 if (!dc_isar_feature(aa32_vrint, s)) {
2283 return false;
2284 }
2285
2286 if (!vfp_access_check(s)) {
2287 return true;
2288 }
2289
2290 tmp = tcg_temp_new_i32();
2291 neon_load_reg32(tmp, a->vm);
2292 fpst = get_fpstatus_ptr(false);
2293 tcg_rmode = tcg_const_i32(float_round_to_zero);
2294 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2295 gen_helper_rints(tmp, tmp, fpst);
2296 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2297 neon_store_reg32(tmp, a->vd);
2298 tcg_temp_free_ptr(fpst);
2299 tcg_temp_free_i32(tcg_rmode);
2300 tcg_temp_free_i32(tmp);
2301 return true;
2302}
2303
2304static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
2305{
2306 TCGv_ptr fpst;
2307 TCGv_i64 tmp;
2308 TCGv_i32 tcg_rmode;
2309
2310 if (!dc_isar_feature(aa32_vrint, s)) {
2311 return false;
2312 }
2313
2314 /* UNDEF accesses to D16-D31 if they don't exist. */
2315 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2316 return false;
2317 }
2318
2319 if (!dc_isar_feature(aa32_fpdp, s)) {
2320 return false;
2321 }
2322
2323 if (!vfp_access_check(s)) {
2324 return true;
2325 }
2326
2327 tmp = tcg_temp_new_i64();
2328 neon_load_reg64(tmp, a->vm);
2329 fpst = get_fpstatus_ptr(false);
2330 tcg_rmode = tcg_const_i32(float_round_to_zero);
2331 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2332 gen_helper_rintd(tmp, tmp, fpst);
2333 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2334 neon_store_reg64(tmp, a->vd);
2335 tcg_temp_free_ptr(fpst);
2336 tcg_temp_free_i64(tmp);
2337 tcg_temp_free_i32(tcg_rmode);
2338 return true;
2339}
2340
2341static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
2342{
2343 TCGv_ptr fpst;
2344 TCGv_i32 tmp;
2345
2346 if (!dc_isar_feature(aa32_vrint, s)) {
2347 return false;
2348 }
2349
2350 if (!vfp_access_check(s)) {
2351 return true;
2352 }
2353
2354 tmp = tcg_temp_new_i32();
2355 neon_load_reg32(tmp, a->vm);
2356 fpst = get_fpstatus_ptr(false);
2357 gen_helper_rints_exact(tmp, tmp, fpst);
2358 neon_store_reg32(tmp, a->vd);
2359 tcg_temp_free_ptr(fpst);
2360 tcg_temp_free_i32(tmp);
2361 return true;
2362}
2363
2364static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
2365{
2366 TCGv_ptr fpst;
2367 TCGv_i64 tmp;
2368
2369 if (!dc_isar_feature(aa32_vrint, s)) {
2370 return false;
2371 }
2372
2373 /* UNDEF accesses to D16-D31 if they don't exist. */
2374 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2375 return false;
2376 }
2377
2378 if (!dc_isar_feature(aa32_fpdp, s)) {
2379 return false;
2380 }
2381
2382 if (!vfp_access_check(s)) {
2383 return true;
2384 }
2385
2386 tmp = tcg_temp_new_i64();
2387 neon_load_reg64(tmp, a->vm);
2388 fpst = get_fpstatus_ptr(false);
2389 gen_helper_rintd_exact(tmp, tmp, fpst);
2390 neon_store_reg64(tmp, a->vd);
2391 tcg_temp_free_ptr(fpst);
2392 tcg_temp_free_i64(tmp);
2393 return true;
2394}
2395
2396static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
2397{
2398 TCGv_i64 vd;
2399 TCGv_i32 vm;
2400
2401 /* UNDEF accesses to D16-D31 if they don't exist. */
2402 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2403 return false;
2404 }
2405
2406 if (!dc_isar_feature(aa32_fpdp, s)) {
2407 return false;
2408 }
2409
2410 if (!vfp_access_check(s)) {
2411 return true;
2412 }
2413
2414 vm = tcg_temp_new_i32();
2415 vd = tcg_temp_new_i64();
2416 neon_load_reg32(vm, a->vm);
2417 gen_helper_vfp_fcvtds(vd, vm, cpu_env);
2418 neon_store_reg64(vd, a->vd);
2419 tcg_temp_free_i32(vm);
2420 tcg_temp_free_i64(vd);
2421 return true;
2422}
2423
2424static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
2425{
2426 TCGv_i64 vm;
2427 TCGv_i32 vd;
2428
2429 /* UNDEF accesses to D16-D31 if they don't exist. */
2430 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2431 return false;
2432 }
2433
2434 if (!dc_isar_feature(aa32_fpdp, s)) {
2435 return false;
2436 }
2437
2438 if (!vfp_access_check(s)) {
2439 return true;
2440 }
2441
2442 vd = tcg_temp_new_i32();
2443 vm = tcg_temp_new_i64();
2444 neon_load_reg64(vm, a->vm);
2445 gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
2446 neon_store_reg32(vd, a->vd);
2447 tcg_temp_free_i32(vd);
2448 tcg_temp_free_i64(vm);
2449 return true;
2450}
2451
2452static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
2453{
2454 TCGv_i32 vm;
2455 TCGv_ptr fpst;
2456
2457 if (!vfp_access_check(s)) {
2458 return true;
2459 }
2460
2461 vm = tcg_temp_new_i32();
2462 neon_load_reg32(vm, a->vm);
2463 fpst = get_fpstatus_ptr(false);
2464 if (a->s) {
2465 /* i32 -> f32 */
2466 gen_helper_vfp_sitos(vm, vm, fpst);
2467 } else {
2468 /* u32 -> f32 */
2469 gen_helper_vfp_uitos(vm, vm, fpst);
2470 }
2471 neon_store_reg32(vm, a->vd);
2472 tcg_temp_free_i32(vm);
2473 tcg_temp_free_ptr(fpst);
2474 return true;
2475}
2476
2477static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
2478{
2479 TCGv_i32 vm;
2480 TCGv_i64 vd;
2481 TCGv_ptr fpst;
2482
2483 /* UNDEF accesses to D16-D31 if they don't exist. */
2484 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2485 return false;
2486 }
2487
2488 if (!dc_isar_feature(aa32_fpdp, s)) {
2489 return false;
2490 }
2491
2492 if (!vfp_access_check(s)) {
2493 return true;
2494 }
2495
2496 vm = tcg_temp_new_i32();
2497 vd = tcg_temp_new_i64();
2498 neon_load_reg32(vm, a->vm);
2499 fpst = get_fpstatus_ptr(false);
2500 if (a->s) {
2501 /* i32 -> f64 */
2502 gen_helper_vfp_sitod(vd, vm, fpst);
2503 } else {
2504 /* u32 -> f64 */
2505 gen_helper_vfp_uitod(vd, vm, fpst);
2506 }
2507 neon_store_reg64(vd, a->vd);
2508 tcg_temp_free_i32(vm);
2509 tcg_temp_free_i64(vd);
2510 tcg_temp_free_ptr(fpst);
2511 return true;
2512}
2513
2514static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
2515{
2516 TCGv_i32 vd;
2517 TCGv_i64 vm;
2518
2519 if (!dc_isar_feature(aa32_jscvt, s)) {
2520 return false;
2521 }
2522
2523 /* UNDEF accesses to D16-D31 if they don't exist. */
2524 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2525 return false;
2526 }
2527
2528 if (!dc_isar_feature(aa32_fpdp, s)) {
2529 return false;
2530 }
2531
2532 if (!vfp_access_check(s)) {
2533 return true;
2534 }
2535
2536 vm = tcg_temp_new_i64();
2537 vd = tcg_temp_new_i32();
2538 neon_load_reg64(vm, a->vm);
2539 gen_helper_vjcvt(vd, vm, cpu_env);
2540 neon_store_reg32(vd, a->vd);
2541 tcg_temp_free_i64(vm);
2542 tcg_temp_free_i32(vd);
2543 return true;
2544}
2545
2546static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
2547{
2548 TCGv_i32 vd, shift;
2549 TCGv_ptr fpst;
2550 int frac_bits;
2551
2552 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
2553 return false;
2554 }
2555
2556 if (!vfp_access_check(s)) {
2557 return true;
2558 }
2559
2560 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
2561
2562 vd = tcg_temp_new_i32();
2563 neon_load_reg32(vd, a->vd);
2564
2565 fpst = get_fpstatus_ptr(false);
2566 shift = tcg_const_i32(frac_bits);
2567
2568 /* Switch on op:U:sx bits */
2569 switch (a->opc) {
2570 case 0:
2571 gen_helper_vfp_shtos(vd, vd, shift, fpst);
2572 break;
2573 case 1:
2574 gen_helper_vfp_sltos(vd, vd, shift, fpst);
2575 break;
2576 case 2:
2577 gen_helper_vfp_uhtos(vd, vd, shift, fpst);
2578 break;
2579 case 3:
2580 gen_helper_vfp_ultos(vd, vd, shift, fpst);
2581 break;
2582 case 4:
2583 gen_helper_vfp_toshs_round_to_zero(vd, vd, shift, fpst);
2584 break;
2585 case 5:
2586 gen_helper_vfp_tosls_round_to_zero(vd, vd, shift, fpst);
2587 break;
2588 case 6:
2589 gen_helper_vfp_touhs_round_to_zero(vd, vd, shift, fpst);
2590 break;
2591 case 7:
2592 gen_helper_vfp_touls_round_to_zero(vd, vd, shift, fpst);
2593 break;
2594 default:
2595 g_assert_not_reached();
2596 }
2597
2598 neon_store_reg32(vd, a->vd);
2599 tcg_temp_free_i32(vd);
2600 tcg_temp_free_i32(shift);
2601 tcg_temp_free_ptr(fpst);
2602 return true;
2603}
2604
2605static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
2606{
2607 TCGv_i64 vd;
2608 TCGv_i32 shift;
2609 TCGv_ptr fpst;
2610 int frac_bits;
2611
2612 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
2613 return false;
2614 }
2615
2616 /* UNDEF accesses to D16-D31 if they don't exist. */
2617 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2618 return false;
2619 }
2620
2621 if (!dc_isar_feature(aa32_fpdp, s)) {
2622 return false;
2623 }
2624
2625 if (!vfp_access_check(s)) {
2626 return true;
2627 }
2628
2629 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
2630
2631 vd = tcg_temp_new_i64();
2632 neon_load_reg64(vd, a->vd);
2633
2634 fpst = get_fpstatus_ptr(false);
2635 shift = tcg_const_i32(frac_bits);
2636
2637 /* Switch on op:U:sx bits */
2638 switch (a->opc) {
2639 case 0:
2640 gen_helper_vfp_shtod(vd, vd, shift, fpst);
2641 break;
2642 case 1:
2643 gen_helper_vfp_sltod(vd, vd, shift, fpst);
2644 break;
2645 case 2:
2646 gen_helper_vfp_uhtod(vd, vd, shift, fpst);
2647 break;
2648 case 3:
2649 gen_helper_vfp_ultod(vd, vd, shift, fpst);
2650 break;
2651 case 4:
2652 gen_helper_vfp_toshd_round_to_zero(vd, vd, shift, fpst);
2653 break;
2654 case 5:
2655 gen_helper_vfp_tosld_round_to_zero(vd, vd, shift, fpst);
2656 break;
2657 case 6:
2658 gen_helper_vfp_touhd_round_to_zero(vd, vd, shift, fpst);
2659 break;
2660 case 7:
2661 gen_helper_vfp_tould_round_to_zero(vd, vd, shift, fpst);
2662 break;
2663 default:
2664 g_assert_not_reached();
2665 }
2666
2667 neon_store_reg64(vd, a->vd);
2668 tcg_temp_free_i64(vd);
2669 tcg_temp_free_i32(shift);
2670 tcg_temp_free_ptr(fpst);
2671 return true;
2672}
2673
2674static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
2675{
2676 TCGv_i32 vm;
2677 TCGv_ptr fpst;
2678
2679 if (!vfp_access_check(s)) {
2680 return true;
2681 }
2682
2683 fpst = get_fpstatus_ptr(false);
2684 vm = tcg_temp_new_i32();
2685 neon_load_reg32(vm, a->vm);
2686
2687 if (a->s) {
2688 if (a->rz) {
2689 gen_helper_vfp_tosizs(vm, vm, fpst);
2690 } else {
2691 gen_helper_vfp_tosis(vm, vm, fpst);
2692 }
2693 } else {
2694 if (a->rz) {
2695 gen_helper_vfp_touizs(vm, vm, fpst);
2696 } else {
2697 gen_helper_vfp_touis(vm, vm, fpst);
2698 }
2699 }
2700 neon_store_reg32(vm, a->vd);
2701 tcg_temp_free_i32(vm);
2702 tcg_temp_free_ptr(fpst);
2703 return true;
2704}
2705
2706static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
2707{
2708 TCGv_i32 vd;
2709 TCGv_i64 vm;
2710 TCGv_ptr fpst;
2711
2712 /* UNDEF accesses to D16-D31 if they don't exist. */
2713 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2714 return false;
2715 }
2716
2717 if (!dc_isar_feature(aa32_fpdp, s)) {
2718 return false;
2719 }
2720
2721 if (!vfp_access_check(s)) {
2722 return true;
2723 }
2724
2725 fpst = get_fpstatus_ptr(false);
2726 vm = tcg_temp_new_i64();
2727 vd = tcg_temp_new_i32();
2728 neon_load_reg64(vm, a->vm);
2729
2730 if (a->s) {
2731 if (a->rz) {
2732 gen_helper_vfp_tosizd(vd, vm, fpst);
2733 } else {
2734 gen_helper_vfp_tosid(vd, vm, fpst);
2735 }
2736 } else {
2737 if (a->rz) {
2738 gen_helper_vfp_touizd(vd, vm, fpst);
2739 } else {
2740 gen_helper_vfp_touid(vd, vm, fpst);
2741 }
2742 }
2743 neon_store_reg32(vd, a->vd);
2744 tcg_temp_free_i32(vd);
2745 tcg_temp_free_i64(vm);
2746 tcg_temp_free_ptr(fpst);
2747 return true;
2748}
2749