1 | /* |
2 | * S/390 translation |
3 | * |
4 | * Copyright (c) 2009 Ulrich Hecht |
5 | * Copyright (c) 2010 Alexander Graf |
6 | * |
7 | * This library is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU Lesser General Public |
9 | * License as published by the Free Software Foundation; either |
10 | * version 2.1 of the License, or (at your option) any later version. |
11 | * |
12 | * This library is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | * Lesser General Public License for more details. |
16 | * |
17 | * You should have received a copy of the GNU Lesser General Public |
18 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
19 | */ |
20 | |
21 | /* #define DEBUG_INLINE_BRANCHES */ |
22 | #define S390X_DEBUG_DISAS |
23 | /* #define S390X_DEBUG_DISAS_VERBOSE */ |
24 | |
25 | #ifdef S390X_DEBUG_DISAS_VERBOSE |
26 | # define LOG_DISAS(...) qemu_log(__VA_ARGS__) |
27 | #else |
28 | # define LOG_DISAS(...) do { } while (0) |
29 | #endif |
30 | |
31 | #include "qemu/osdep.h" |
32 | #include "cpu.h" |
33 | #include "internal.h" |
34 | #include "disas/disas.h" |
35 | #include "exec/exec-all.h" |
36 | #include "tcg-op.h" |
37 | #include "tcg-op-gvec.h" |
38 | #include "qemu/log.h" |
39 | #include "qemu/host-utils.h" |
40 | #include "exec/cpu_ldst.h" |
41 | #include "exec/gen-icount.h" |
42 | #include "exec/helper-proto.h" |
43 | #include "exec/helper-gen.h" |
44 | |
45 | #include "trace-tcg.h" |
46 | #include "exec/translator.h" |
47 | #include "exec/log.h" |
48 | #include "qemu/atomic128.h" |
49 | |
50 | |
51 | /* Information that (most) every instruction needs to manipulate. */ |
52 | typedef struct DisasContext DisasContext; |
53 | typedef struct DisasInsn DisasInsn; |
54 | typedef struct DisasFields DisasFields; |
55 | |
56 | struct DisasContext { |
57 | DisasContextBase base; |
58 | const DisasInsn *insn; |
59 | DisasFields *fields; |
60 | uint64_t ex_value; |
61 | /* |
62 | * During translate_one(), pc_tmp is used to determine the instruction |
63 | * to be executed after base.pc_next - e.g. next sequential instruction |
64 | * or a branch target. |
65 | */ |
66 | uint64_t pc_tmp; |
67 | uint32_t ilen; |
68 | enum cc_op cc_op; |
69 | bool do_debug; |
70 | }; |
71 | |
72 | /* Information carried about a condition to be evaluated. */ |
73 | typedef struct { |
74 | TCGCond cond:8; |
75 | bool is_64; |
76 | bool g1; |
77 | bool g2; |
78 | union { |
79 | struct { TCGv_i64 a, b; } s64; |
80 | struct { TCGv_i32 a, b; } s32; |
81 | } u; |
82 | } DisasCompare; |
83 | |
84 | #ifdef DEBUG_INLINE_BRANCHES |
85 | static uint64_t inline_branch_hit[CC_OP_MAX]; |
86 | static uint64_t inline_branch_miss[CC_OP_MAX]; |
87 | #endif |
88 | |
89 | static void pc_to_link_info(TCGv_i64 out, DisasContext *s, uint64_t pc) |
90 | { |
91 | TCGv_i64 tmp; |
92 | |
93 | if (s->base.tb->flags & FLAG_MASK_32) { |
94 | if (s->base.tb->flags & FLAG_MASK_64) { |
95 | tcg_gen_movi_i64(out, pc); |
96 | return; |
97 | } |
98 | pc |= 0x80000000; |
99 | } |
100 | assert(!(s->base.tb->flags & FLAG_MASK_64)); |
101 | tmp = tcg_const_i64(pc); |
102 | tcg_gen_deposit_i64(out, out, tmp, 0, 32); |
103 | tcg_temp_free_i64(tmp); |
104 | } |
105 | |
106 | static TCGv_i64 psw_addr; |
107 | static TCGv_i64 psw_mask; |
108 | static TCGv_i64 gbea; |
109 | |
110 | static TCGv_i32 cc_op; |
111 | static TCGv_i64 cc_src; |
112 | static TCGv_i64 cc_dst; |
113 | static TCGv_i64 cc_vr; |
114 | |
115 | static char cpu_reg_names[16][4]; |
116 | static TCGv_i64 regs[16]; |
117 | |
118 | void s390x_translate_init(void) |
119 | { |
120 | int i; |
121 | |
122 | psw_addr = tcg_global_mem_new_i64(cpu_env, |
123 | offsetof(CPUS390XState, psw.addr), |
124 | "psw_addr" ); |
125 | psw_mask = tcg_global_mem_new_i64(cpu_env, |
126 | offsetof(CPUS390XState, psw.mask), |
127 | "psw_mask" ); |
128 | gbea = tcg_global_mem_new_i64(cpu_env, |
129 | offsetof(CPUS390XState, gbea), |
130 | "gbea" ); |
131 | |
132 | cc_op = tcg_global_mem_new_i32(cpu_env, offsetof(CPUS390XState, cc_op), |
133 | "cc_op" ); |
134 | cc_src = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_src), |
135 | "cc_src" ); |
136 | cc_dst = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_dst), |
137 | "cc_dst" ); |
138 | cc_vr = tcg_global_mem_new_i64(cpu_env, offsetof(CPUS390XState, cc_vr), |
139 | "cc_vr" ); |
140 | |
141 | for (i = 0; i < 16; i++) { |
142 | snprintf(cpu_reg_names[i], sizeof(cpu_reg_names[0]), "r%d" , i); |
143 | regs[i] = tcg_global_mem_new(cpu_env, |
144 | offsetof(CPUS390XState, regs[i]), |
145 | cpu_reg_names[i]); |
146 | } |
147 | } |
148 | |
149 | static inline int vec_full_reg_offset(uint8_t reg) |
150 | { |
151 | g_assert(reg < 32); |
152 | return offsetof(CPUS390XState, vregs[reg][0]); |
153 | } |
154 | |
155 | static inline int vec_reg_offset(uint8_t reg, uint8_t enr, MemOp es) |
156 | { |
157 | /* Convert element size (es) - e.g. MO_8 - to bytes */ |
158 | const uint8_t bytes = 1 << es; |
159 | int offs = enr * bytes; |
160 | |
161 | /* |
162 | * vregs[n][0] is the lowest 8 byte and vregs[n][1] the highest 8 byte |
163 | * of the 16 byte vector, on both, little and big endian systems. |
164 | * |
165 | * Big Endian (target/possible host) |
166 | * B: [ 0][ 1][ 2][ 3][ 4][ 5][ 6][ 7] - [ 8][ 9][10][11][12][13][14][15] |
167 | * HW: [ 0][ 1][ 2][ 3] - [ 4][ 5][ 6][ 7] |
168 | * W: [ 0][ 1] - [ 2][ 3] |
169 | * DW: [ 0] - [ 1] |
170 | * |
171 | * Little Endian (possible host) |
172 | * B: [ 7][ 6][ 5][ 4][ 3][ 2][ 1][ 0] - [15][14][13][12][11][10][ 9][ 8] |
173 | * HW: [ 3][ 2][ 1][ 0] - [ 7][ 6][ 5][ 4] |
174 | * W: [ 1][ 0] - [ 3][ 2] |
175 | * DW: [ 0] - [ 1] |
176 | * |
177 | * For 16 byte elements, the two 8 byte halves will not form a host |
178 | * int128 if the host is little endian, since they're in the wrong order. |
179 | * Some operations (e.g. xor) do not care. For operations like addition, |
180 | * the two 8 byte elements have to be loaded separately. Let's force all |
181 | * 16 byte operations to handle it in a special way. |
182 | */ |
183 | g_assert(es <= MO_64); |
184 | #ifndef HOST_WORDS_BIGENDIAN |
185 | offs ^= (8 - bytes); |
186 | #endif |
187 | return offs + vec_full_reg_offset(reg); |
188 | } |
189 | |
190 | static inline int freg64_offset(uint8_t reg) |
191 | { |
192 | g_assert(reg < 16); |
193 | return vec_reg_offset(reg, 0, MO_64); |
194 | } |
195 | |
196 | static inline int freg32_offset(uint8_t reg) |
197 | { |
198 | g_assert(reg < 16); |
199 | return vec_reg_offset(reg, 0, MO_32); |
200 | } |
201 | |
202 | static TCGv_i64 load_reg(int reg) |
203 | { |
204 | TCGv_i64 r = tcg_temp_new_i64(); |
205 | tcg_gen_mov_i64(r, regs[reg]); |
206 | return r; |
207 | } |
208 | |
209 | static TCGv_i64 load_freg(int reg) |
210 | { |
211 | TCGv_i64 r = tcg_temp_new_i64(); |
212 | |
213 | tcg_gen_ld_i64(r, cpu_env, freg64_offset(reg)); |
214 | return r; |
215 | } |
216 | |
217 | static TCGv_i64 load_freg32_i64(int reg) |
218 | { |
219 | TCGv_i64 r = tcg_temp_new_i64(); |
220 | |
221 | tcg_gen_ld32u_i64(r, cpu_env, freg32_offset(reg)); |
222 | return r; |
223 | } |
224 | |
225 | static void store_reg(int reg, TCGv_i64 v) |
226 | { |
227 | tcg_gen_mov_i64(regs[reg], v); |
228 | } |
229 | |
230 | static void store_freg(int reg, TCGv_i64 v) |
231 | { |
232 | tcg_gen_st_i64(v, cpu_env, freg64_offset(reg)); |
233 | } |
234 | |
235 | static void store_reg32_i64(int reg, TCGv_i64 v) |
236 | { |
237 | /* 32 bit register writes keep the upper half */ |
238 | tcg_gen_deposit_i64(regs[reg], regs[reg], v, 0, 32); |
239 | } |
240 | |
241 | static void store_reg32h_i64(int reg, TCGv_i64 v) |
242 | { |
243 | tcg_gen_deposit_i64(regs[reg], regs[reg], v, 32, 32); |
244 | } |
245 | |
246 | static void store_freg32_i64(int reg, TCGv_i64 v) |
247 | { |
248 | tcg_gen_st32_i64(v, cpu_env, freg32_offset(reg)); |
249 | } |
250 | |
251 | static void return_low128(TCGv_i64 dest) |
252 | { |
253 | tcg_gen_ld_i64(dest, cpu_env, offsetof(CPUS390XState, retxl)); |
254 | } |
255 | |
256 | static void update_psw_addr(DisasContext *s) |
257 | { |
258 | /* psw.addr */ |
259 | tcg_gen_movi_i64(psw_addr, s->base.pc_next); |
260 | } |
261 | |
262 | static void per_branch(DisasContext *s, bool to_next) |
263 | { |
264 | #ifndef CONFIG_USER_ONLY |
265 | tcg_gen_movi_i64(gbea, s->base.pc_next); |
266 | |
267 | if (s->base.tb->flags & FLAG_MASK_PER) { |
268 | TCGv_i64 next_pc = to_next ? tcg_const_i64(s->pc_tmp) : psw_addr; |
269 | gen_helper_per_branch(cpu_env, gbea, next_pc); |
270 | if (to_next) { |
271 | tcg_temp_free_i64(next_pc); |
272 | } |
273 | } |
274 | #endif |
275 | } |
276 | |
277 | static void per_branch_cond(DisasContext *s, TCGCond cond, |
278 | TCGv_i64 arg1, TCGv_i64 arg2) |
279 | { |
280 | #ifndef CONFIG_USER_ONLY |
281 | if (s->base.tb->flags & FLAG_MASK_PER) { |
282 | TCGLabel *lab = gen_new_label(); |
283 | tcg_gen_brcond_i64(tcg_invert_cond(cond), arg1, arg2, lab); |
284 | |
285 | tcg_gen_movi_i64(gbea, s->base.pc_next); |
286 | gen_helper_per_branch(cpu_env, gbea, psw_addr); |
287 | |
288 | gen_set_label(lab); |
289 | } else { |
290 | TCGv_i64 pc = tcg_const_i64(s->base.pc_next); |
291 | tcg_gen_movcond_i64(cond, gbea, arg1, arg2, gbea, pc); |
292 | tcg_temp_free_i64(pc); |
293 | } |
294 | #endif |
295 | } |
296 | |
297 | static void per_breaking_event(DisasContext *s) |
298 | { |
299 | tcg_gen_movi_i64(gbea, s->base.pc_next); |
300 | } |
301 | |
302 | static void update_cc_op(DisasContext *s) |
303 | { |
304 | if (s->cc_op != CC_OP_DYNAMIC && s->cc_op != CC_OP_STATIC) { |
305 | tcg_gen_movi_i32(cc_op, s->cc_op); |
306 | } |
307 | } |
308 | |
309 | static inline uint64_t ld_code2(CPUS390XState *env, uint64_t pc) |
310 | { |
311 | return (uint64_t)cpu_lduw_code(env, pc); |
312 | } |
313 | |
314 | static inline uint64_t ld_code4(CPUS390XState *env, uint64_t pc) |
315 | { |
316 | return (uint64_t)(uint32_t)cpu_ldl_code(env, pc); |
317 | } |
318 | |
319 | static int get_mem_index(DisasContext *s) |
320 | { |
321 | if (!(s->base.tb->flags & FLAG_MASK_DAT)) { |
322 | return MMU_REAL_IDX; |
323 | } |
324 | |
325 | switch (s->base.tb->flags & FLAG_MASK_ASC) { |
326 | case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: |
327 | return MMU_PRIMARY_IDX; |
328 | case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT: |
329 | return MMU_SECONDARY_IDX; |
330 | case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT: |
331 | return MMU_HOME_IDX; |
332 | default: |
333 | tcg_abort(); |
334 | break; |
335 | } |
336 | } |
337 | |
338 | static void gen_exception(int excp) |
339 | { |
340 | TCGv_i32 tmp = tcg_const_i32(excp); |
341 | gen_helper_exception(cpu_env, tmp); |
342 | tcg_temp_free_i32(tmp); |
343 | } |
344 | |
345 | static void gen_program_exception(DisasContext *s, int code) |
346 | { |
347 | TCGv_i32 tmp; |
348 | |
349 | /* Remember what pgm exeption this was. */ |
350 | tmp = tcg_const_i32(code); |
351 | tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code)); |
352 | tcg_temp_free_i32(tmp); |
353 | |
354 | tmp = tcg_const_i32(s->ilen); |
355 | tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_ilen)); |
356 | tcg_temp_free_i32(tmp); |
357 | |
358 | /* update the psw */ |
359 | update_psw_addr(s); |
360 | |
361 | /* Save off cc. */ |
362 | update_cc_op(s); |
363 | |
364 | /* Trigger exception. */ |
365 | gen_exception(EXCP_PGM); |
366 | } |
367 | |
368 | static inline void gen_illegal_opcode(DisasContext *s) |
369 | { |
370 | gen_program_exception(s, PGM_OPERATION); |
371 | } |
372 | |
373 | static inline void gen_data_exception(uint8_t dxc) |
374 | { |
375 | TCGv_i32 tmp = tcg_const_i32(dxc); |
376 | gen_helper_data_exception(cpu_env, tmp); |
377 | tcg_temp_free_i32(tmp); |
378 | } |
379 | |
380 | static inline void gen_trap(DisasContext *s) |
381 | { |
382 | /* Set DXC to 0xff */ |
383 | gen_data_exception(0xff); |
384 | } |
385 | |
386 | static void gen_addi_and_wrap_i64(DisasContext *s, TCGv_i64 dst, TCGv_i64 src, |
387 | int64_t imm) |
388 | { |
389 | tcg_gen_addi_i64(dst, src, imm); |
390 | if (!(s->base.tb->flags & FLAG_MASK_64)) { |
391 | if (s->base.tb->flags & FLAG_MASK_32) { |
392 | tcg_gen_andi_i64(dst, dst, 0x7fffffff); |
393 | } else { |
394 | tcg_gen_andi_i64(dst, dst, 0x00ffffff); |
395 | } |
396 | } |
397 | } |
398 | |
399 | static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2) |
400 | { |
401 | TCGv_i64 tmp = tcg_temp_new_i64(); |
402 | |
403 | /* |
404 | * Note that d2 is limited to 20 bits, signed. If we crop negative |
405 | * displacements early we create larger immedate addends. |
406 | */ |
407 | if (b2 && x2) { |
408 | tcg_gen_add_i64(tmp, regs[b2], regs[x2]); |
409 | gen_addi_and_wrap_i64(s, tmp, tmp, d2); |
410 | } else if (b2) { |
411 | gen_addi_and_wrap_i64(s, tmp, regs[b2], d2); |
412 | } else if (x2) { |
413 | gen_addi_and_wrap_i64(s, tmp, regs[x2], d2); |
414 | } else if (!(s->base.tb->flags & FLAG_MASK_64)) { |
415 | if (s->base.tb->flags & FLAG_MASK_32) { |
416 | tcg_gen_movi_i64(tmp, d2 & 0x7fffffff); |
417 | } else { |
418 | tcg_gen_movi_i64(tmp, d2 & 0x00ffffff); |
419 | } |
420 | } else { |
421 | tcg_gen_movi_i64(tmp, d2); |
422 | } |
423 | |
424 | return tmp; |
425 | } |
426 | |
427 | static inline bool live_cc_data(DisasContext *s) |
428 | { |
429 | return (s->cc_op != CC_OP_DYNAMIC |
430 | && s->cc_op != CC_OP_STATIC |
431 | && s->cc_op > 3); |
432 | } |
433 | |
434 | static inline void gen_op_movi_cc(DisasContext *s, uint32_t val) |
435 | { |
436 | if (live_cc_data(s)) { |
437 | tcg_gen_discard_i64(cc_src); |
438 | tcg_gen_discard_i64(cc_dst); |
439 | tcg_gen_discard_i64(cc_vr); |
440 | } |
441 | s->cc_op = CC_OP_CONST0 + val; |
442 | } |
443 | |
444 | static void gen_op_update1_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 dst) |
445 | { |
446 | if (live_cc_data(s)) { |
447 | tcg_gen_discard_i64(cc_src); |
448 | tcg_gen_discard_i64(cc_vr); |
449 | } |
450 | tcg_gen_mov_i64(cc_dst, dst); |
451 | s->cc_op = op; |
452 | } |
453 | |
454 | static void gen_op_update2_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, |
455 | TCGv_i64 dst) |
456 | { |
457 | if (live_cc_data(s)) { |
458 | tcg_gen_discard_i64(cc_vr); |
459 | } |
460 | tcg_gen_mov_i64(cc_src, src); |
461 | tcg_gen_mov_i64(cc_dst, dst); |
462 | s->cc_op = op; |
463 | } |
464 | |
465 | static void gen_op_update3_cc_i64(DisasContext *s, enum cc_op op, TCGv_i64 src, |
466 | TCGv_i64 dst, TCGv_i64 vr) |
467 | { |
468 | tcg_gen_mov_i64(cc_src, src); |
469 | tcg_gen_mov_i64(cc_dst, dst); |
470 | tcg_gen_mov_i64(cc_vr, vr); |
471 | s->cc_op = op; |
472 | } |
473 | |
474 | static void set_cc_nz_u64(DisasContext *s, TCGv_i64 val) |
475 | { |
476 | gen_op_update1_cc_i64(s, CC_OP_NZ, val); |
477 | } |
478 | |
479 | static void gen_set_cc_nz_f32(DisasContext *s, TCGv_i64 val) |
480 | { |
481 | gen_op_update1_cc_i64(s, CC_OP_NZ_F32, val); |
482 | } |
483 | |
484 | static void gen_set_cc_nz_f64(DisasContext *s, TCGv_i64 val) |
485 | { |
486 | gen_op_update1_cc_i64(s, CC_OP_NZ_F64, val); |
487 | } |
488 | |
489 | static void gen_set_cc_nz_f128(DisasContext *s, TCGv_i64 vh, TCGv_i64 vl) |
490 | { |
491 | gen_op_update2_cc_i64(s, CC_OP_NZ_F128, vh, vl); |
492 | } |
493 | |
494 | /* CC value is in env->cc_op */ |
495 | static void set_cc_static(DisasContext *s) |
496 | { |
497 | if (live_cc_data(s)) { |
498 | tcg_gen_discard_i64(cc_src); |
499 | tcg_gen_discard_i64(cc_dst); |
500 | tcg_gen_discard_i64(cc_vr); |
501 | } |
502 | s->cc_op = CC_OP_STATIC; |
503 | } |
504 | |
505 | /* calculates cc into cc_op */ |
506 | static void gen_op_calc_cc(DisasContext *s) |
507 | { |
508 | TCGv_i32 local_cc_op = NULL; |
509 | TCGv_i64 dummy = NULL; |
510 | |
511 | switch (s->cc_op) { |
512 | default: |
513 | dummy = tcg_const_i64(0); |
514 | /* FALLTHRU */ |
515 | case CC_OP_ADD_64: |
516 | case CC_OP_ADDU_64: |
517 | case CC_OP_ADDC_64: |
518 | case CC_OP_SUB_64: |
519 | case CC_OP_SUBU_64: |
520 | case CC_OP_SUBB_64: |
521 | case CC_OP_ADD_32: |
522 | case CC_OP_ADDU_32: |
523 | case CC_OP_ADDC_32: |
524 | case CC_OP_SUB_32: |
525 | case CC_OP_SUBU_32: |
526 | case CC_OP_SUBB_32: |
527 | local_cc_op = tcg_const_i32(s->cc_op); |
528 | break; |
529 | case CC_OP_CONST0: |
530 | case CC_OP_CONST1: |
531 | case CC_OP_CONST2: |
532 | case CC_OP_CONST3: |
533 | case CC_OP_STATIC: |
534 | case CC_OP_DYNAMIC: |
535 | break; |
536 | } |
537 | |
538 | switch (s->cc_op) { |
539 | case CC_OP_CONST0: |
540 | case CC_OP_CONST1: |
541 | case CC_OP_CONST2: |
542 | case CC_OP_CONST3: |
543 | /* s->cc_op is the cc value */ |
544 | tcg_gen_movi_i32(cc_op, s->cc_op - CC_OP_CONST0); |
545 | break; |
546 | case CC_OP_STATIC: |
547 | /* env->cc_op already is the cc value */ |
548 | break; |
549 | case CC_OP_NZ: |
550 | case CC_OP_ABS_64: |
551 | case CC_OP_NABS_64: |
552 | case CC_OP_ABS_32: |
553 | case CC_OP_NABS_32: |
554 | case CC_OP_LTGT0_32: |
555 | case CC_OP_LTGT0_64: |
556 | case CC_OP_COMP_32: |
557 | case CC_OP_COMP_64: |
558 | case CC_OP_NZ_F32: |
559 | case CC_OP_NZ_F64: |
560 | case CC_OP_FLOGR: |
561 | case CC_OP_LCBB: |
562 | /* 1 argument */ |
563 | gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, dummy, cc_dst, dummy); |
564 | break; |
565 | case CC_OP_ICM: |
566 | case CC_OP_LTGT_32: |
567 | case CC_OP_LTGT_64: |
568 | case CC_OP_LTUGTU_32: |
569 | case CC_OP_LTUGTU_64: |
570 | case CC_OP_TM_32: |
571 | case CC_OP_TM_64: |
572 | case CC_OP_SLA_32: |
573 | case CC_OP_SLA_64: |
574 | case CC_OP_NZ_F128: |
575 | case CC_OP_VC: |
576 | /* 2 arguments */ |
577 | gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, dummy); |
578 | break; |
579 | case CC_OP_ADD_64: |
580 | case CC_OP_ADDU_64: |
581 | case CC_OP_ADDC_64: |
582 | case CC_OP_SUB_64: |
583 | case CC_OP_SUBU_64: |
584 | case CC_OP_SUBB_64: |
585 | case CC_OP_ADD_32: |
586 | case CC_OP_ADDU_32: |
587 | case CC_OP_ADDC_32: |
588 | case CC_OP_SUB_32: |
589 | case CC_OP_SUBU_32: |
590 | case CC_OP_SUBB_32: |
591 | /* 3 arguments */ |
592 | gen_helper_calc_cc(cc_op, cpu_env, local_cc_op, cc_src, cc_dst, cc_vr); |
593 | break; |
594 | case CC_OP_DYNAMIC: |
595 | /* unknown operation - assume 3 arguments and cc_op in env */ |
596 | gen_helper_calc_cc(cc_op, cpu_env, cc_op, cc_src, cc_dst, cc_vr); |
597 | break; |
598 | default: |
599 | tcg_abort(); |
600 | } |
601 | |
602 | if (local_cc_op) { |
603 | tcg_temp_free_i32(local_cc_op); |
604 | } |
605 | if (dummy) { |
606 | tcg_temp_free_i64(dummy); |
607 | } |
608 | |
609 | /* We now have cc in cc_op as constant */ |
610 | set_cc_static(s); |
611 | } |
612 | |
613 | static bool use_exit_tb(DisasContext *s) |
614 | { |
615 | return s->base.singlestep_enabled || |
616 | (tb_cflags(s->base.tb) & CF_LAST_IO) || |
617 | (s->base.tb->flags & FLAG_MASK_PER); |
618 | } |
619 | |
620 | static bool use_goto_tb(DisasContext *s, uint64_t dest) |
621 | { |
622 | if (unlikely(use_exit_tb(s))) { |
623 | return false; |
624 | } |
625 | #ifndef CONFIG_USER_ONLY |
626 | return (dest & TARGET_PAGE_MASK) == (s->base.tb->pc & TARGET_PAGE_MASK) || |
627 | (dest & TARGET_PAGE_MASK) == (s->base.pc_next & TARGET_PAGE_MASK); |
628 | #else |
629 | return true; |
630 | #endif |
631 | } |
632 | |
633 | static void account_noninline_branch(DisasContext *s, int cc_op) |
634 | { |
635 | #ifdef DEBUG_INLINE_BRANCHES |
636 | inline_branch_miss[cc_op]++; |
637 | #endif |
638 | } |
639 | |
640 | static void account_inline_branch(DisasContext *s, int cc_op) |
641 | { |
642 | #ifdef DEBUG_INLINE_BRANCHES |
643 | inline_branch_hit[cc_op]++; |
644 | #endif |
645 | } |
646 | |
647 | /* Table of mask values to comparison codes, given a comparison as input. |
648 | For such, CC=3 should not be possible. */ |
649 | static const TCGCond ltgt_cond[16] = { |
650 | TCG_COND_NEVER, TCG_COND_NEVER, /* | | | x */ |
651 | TCG_COND_GT, TCG_COND_GT, /* | | GT | x */ |
652 | TCG_COND_LT, TCG_COND_LT, /* | LT | | x */ |
653 | TCG_COND_NE, TCG_COND_NE, /* | LT | GT | x */ |
654 | TCG_COND_EQ, TCG_COND_EQ, /* EQ | | | x */ |
655 | TCG_COND_GE, TCG_COND_GE, /* EQ | | GT | x */ |
656 | TCG_COND_LE, TCG_COND_LE, /* EQ | LT | | x */ |
657 | TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | LT | GT | x */ |
658 | }; |
659 | |
660 | /* Table of mask values to comparison codes, given a logic op as input. |
661 | For such, only CC=0 and CC=1 should be possible. */ |
662 | static const TCGCond nz_cond[16] = { |
663 | TCG_COND_NEVER, TCG_COND_NEVER, /* | | x | x */ |
664 | TCG_COND_NEVER, TCG_COND_NEVER, |
665 | TCG_COND_NE, TCG_COND_NE, /* | NE | x | x */ |
666 | TCG_COND_NE, TCG_COND_NE, |
667 | TCG_COND_EQ, TCG_COND_EQ, /* EQ | | x | x */ |
668 | TCG_COND_EQ, TCG_COND_EQ, |
669 | TCG_COND_ALWAYS, TCG_COND_ALWAYS, /* EQ | NE | x | x */ |
670 | TCG_COND_ALWAYS, TCG_COND_ALWAYS, |
671 | }; |
672 | |
673 | /* Interpret MASK in terms of S->CC_OP, and fill in C with all the |
674 | details required to generate a TCG comparison. */ |
675 | static void disas_jcc(DisasContext *s, DisasCompare *c, uint32_t mask) |
676 | { |
677 | TCGCond cond; |
678 | enum cc_op old_cc_op = s->cc_op; |
679 | |
680 | if (mask == 15 || mask == 0) { |
681 | c->cond = (mask ? TCG_COND_ALWAYS : TCG_COND_NEVER); |
682 | c->u.s32.a = cc_op; |
683 | c->u.s32.b = cc_op; |
684 | c->g1 = c->g2 = true; |
685 | c->is_64 = false; |
686 | return; |
687 | } |
688 | |
689 | /* Find the TCG condition for the mask + cc op. */ |
690 | switch (old_cc_op) { |
691 | case CC_OP_LTGT0_32: |
692 | case CC_OP_LTGT0_64: |
693 | case CC_OP_LTGT_32: |
694 | case CC_OP_LTGT_64: |
695 | cond = ltgt_cond[mask]; |
696 | if (cond == TCG_COND_NEVER) { |
697 | goto do_dynamic; |
698 | } |
699 | account_inline_branch(s, old_cc_op); |
700 | break; |
701 | |
702 | case CC_OP_LTUGTU_32: |
703 | case CC_OP_LTUGTU_64: |
704 | cond = tcg_unsigned_cond(ltgt_cond[mask]); |
705 | if (cond == TCG_COND_NEVER) { |
706 | goto do_dynamic; |
707 | } |
708 | account_inline_branch(s, old_cc_op); |
709 | break; |
710 | |
711 | case CC_OP_NZ: |
712 | cond = nz_cond[mask]; |
713 | if (cond == TCG_COND_NEVER) { |
714 | goto do_dynamic; |
715 | } |
716 | account_inline_branch(s, old_cc_op); |
717 | break; |
718 | |
719 | case CC_OP_TM_32: |
720 | case CC_OP_TM_64: |
721 | switch (mask) { |
722 | case 8: |
723 | cond = TCG_COND_EQ; |
724 | break; |
725 | case 4 | 2 | 1: |
726 | cond = TCG_COND_NE; |
727 | break; |
728 | default: |
729 | goto do_dynamic; |
730 | } |
731 | account_inline_branch(s, old_cc_op); |
732 | break; |
733 | |
734 | case CC_OP_ICM: |
735 | switch (mask) { |
736 | case 8: |
737 | cond = TCG_COND_EQ; |
738 | break; |
739 | case 4 | 2 | 1: |
740 | case 4 | 2: |
741 | cond = TCG_COND_NE; |
742 | break; |
743 | default: |
744 | goto do_dynamic; |
745 | } |
746 | account_inline_branch(s, old_cc_op); |
747 | break; |
748 | |
749 | case CC_OP_FLOGR: |
750 | switch (mask & 0xa) { |
751 | case 8: /* src == 0 -> no one bit found */ |
752 | cond = TCG_COND_EQ; |
753 | break; |
754 | case 2: /* src != 0 -> one bit found */ |
755 | cond = TCG_COND_NE; |
756 | break; |
757 | default: |
758 | goto do_dynamic; |
759 | } |
760 | account_inline_branch(s, old_cc_op); |
761 | break; |
762 | |
763 | case CC_OP_ADDU_32: |
764 | case CC_OP_ADDU_64: |
765 | switch (mask) { |
766 | case 8 | 2: /* vr == 0 */ |
767 | cond = TCG_COND_EQ; |
768 | break; |
769 | case 4 | 1: /* vr != 0 */ |
770 | cond = TCG_COND_NE; |
771 | break; |
772 | case 8 | 4: /* no carry -> vr >= src */ |
773 | cond = TCG_COND_GEU; |
774 | break; |
775 | case 2 | 1: /* carry -> vr < src */ |
776 | cond = TCG_COND_LTU; |
777 | break; |
778 | default: |
779 | goto do_dynamic; |
780 | } |
781 | account_inline_branch(s, old_cc_op); |
782 | break; |
783 | |
784 | case CC_OP_SUBU_32: |
785 | case CC_OP_SUBU_64: |
786 | /* Note that CC=0 is impossible; treat it as dont-care. */ |
787 | switch (mask & 7) { |
788 | case 2: /* zero -> op1 == op2 */ |
789 | cond = TCG_COND_EQ; |
790 | break; |
791 | case 4 | 1: /* !zero -> op1 != op2 */ |
792 | cond = TCG_COND_NE; |
793 | break; |
794 | case 4: /* borrow (!carry) -> op1 < op2 */ |
795 | cond = TCG_COND_LTU; |
796 | break; |
797 | case 2 | 1: /* !borrow (carry) -> op1 >= op2 */ |
798 | cond = TCG_COND_GEU; |
799 | break; |
800 | default: |
801 | goto do_dynamic; |
802 | } |
803 | account_inline_branch(s, old_cc_op); |
804 | break; |
805 | |
806 | default: |
807 | do_dynamic: |
808 | /* Calculate cc value. */ |
809 | gen_op_calc_cc(s); |
810 | /* FALLTHRU */ |
811 | |
812 | case CC_OP_STATIC: |
813 | /* Jump based on CC. We'll load up the real cond below; |
814 | the assignment here merely avoids a compiler warning. */ |
815 | account_noninline_branch(s, old_cc_op); |
816 | old_cc_op = CC_OP_STATIC; |
817 | cond = TCG_COND_NEVER; |
818 | break; |
819 | } |
820 | |
821 | /* Load up the arguments of the comparison. */ |
822 | c->is_64 = true; |
823 | c->g1 = c->g2 = false; |
824 | switch (old_cc_op) { |
825 | case CC_OP_LTGT0_32: |
826 | c->is_64 = false; |
827 | c->u.s32.a = tcg_temp_new_i32(); |
828 | tcg_gen_extrl_i64_i32(c->u.s32.a, cc_dst); |
829 | c->u.s32.b = tcg_const_i32(0); |
830 | break; |
831 | case CC_OP_LTGT_32: |
832 | case CC_OP_LTUGTU_32: |
833 | case CC_OP_SUBU_32: |
834 | c->is_64 = false; |
835 | c->u.s32.a = tcg_temp_new_i32(); |
836 | tcg_gen_extrl_i64_i32(c->u.s32.a, cc_src); |
837 | c->u.s32.b = tcg_temp_new_i32(); |
838 | tcg_gen_extrl_i64_i32(c->u.s32.b, cc_dst); |
839 | break; |
840 | |
841 | case CC_OP_LTGT0_64: |
842 | case CC_OP_NZ: |
843 | case CC_OP_FLOGR: |
844 | c->u.s64.a = cc_dst; |
845 | c->u.s64.b = tcg_const_i64(0); |
846 | c->g1 = true; |
847 | break; |
848 | case CC_OP_LTGT_64: |
849 | case CC_OP_LTUGTU_64: |
850 | case CC_OP_SUBU_64: |
851 | c->u.s64.a = cc_src; |
852 | c->u.s64.b = cc_dst; |
853 | c->g1 = c->g2 = true; |
854 | break; |
855 | |
856 | case CC_OP_TM_32: |
857 | case CC_OP_TM_64: |
858 | case CC_OP_ICM: |
859 | c->u.s64.a = tcg_temp_new_i64(); |
860 | c->u.s64.b = tcg_const_i64(0); |
861 | tcg_gen_and_i64(c->u.s64.a, cc_src, cc_dst); |
862 | break; |
863 | |
864 | case CC_OP_ADDU_32: |
865 | c->is_64 = false; |
866 | c->u.s32.a = tcg_temp_new_i32(); |
867 | c->u.s32.b = tcg_temp_new_i32(); |
868 | tcg_gen_extrl_i64_i32(c->u.s32.a, cc_vr); |
869 | if (cond == TCG_COND_EQ || cond == TCG_COND_NE) { |
870 | tcg_gen_movi_i32(c->u.s32.b, 0); |
871 | } else { |
872 | tcg_gen_extrl_i64_i32(c->u.s32.b, cc_src); |
873 | } |
874 | break; |
875 | |
876 | case CC_OP_ADDU_64: |
877 | c->u.s64.a = cc_vr; |
878 | c->g1 = true; |
879 | if (cond == TCG_COND_EQ || cond == TCG_COND_NE) { |
880 | c->u.s64.b = tcg_const_i64(0); |
881 | } else { |
882 | c->u.s64.b = cc_src; |
883 | c->g2 = true; |
884 | } |
885 | break; |
886 | |
887 | case CC_OP_STATIC: |
888 | c->is_64 = false; |
889 | c->u.s32.a = cc_op; |
890 | c->g1 = true; |
891 | switch (mask) { |
892 | case 0x8 | 0x4 | 0x2: /* cc != 3 */ |
893 | cond = TCG_COND_NE; |
894 | c->u.s32.b = tcg_const_i32(3); |
895 | break; |
896 | case 0x8 | 0x4 | 0x1: /* cc != 2 */ |
897 | cond = TCG_COND_NE; |
898 | c->u.s32.b = tcg_const_i32(2); |
899 | break; |
900 | case 0x8 | 0x2 | 0x1: /* cc != 1 */ |
901 | cond = TCG_COND_NE; |
902 | c->u.s32.b = tcg_const_i32(1); |
903 | break; |
904 | case 0x8 | 0x2: /* cc == 0 || cc == 2 => (cc & 1) == 0 */ |
905 | cond = TCG_COND_EQ; |
906 | c->g1 = false; |
907 | c->u.s32.a = tcg_temp_new_i32(); |
908 | c->u.s32.b = tcg_const_i32(0); |
909 | tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); |
910 | break; |
911 | case 0x8 | 0x4: /* cc < 2 */ |
912 | cond = TCG_COND_LTU; |
913 | c->u.s32.b = tcg_const_i32(2); |
914 | break; |
915 | case 0x8: /* cc == 0 */ |
916 | cond = TCG_COND_EQ; |
917 | c->u.s32.b = tcg_const_i32(0); |
918 | break; |
919 | case 0x4 | 0x2 | 0x1: /* cc != 0 */ |
920 | cond = TCG_COND_NE; |
921 | c->u.s32.b = tcg_const_i32(0); |
922 | break; |
923 | case 0x4 | 0x1: /* cc == 1 || cc == 3 => (cc & 1) != 0 */ |
924 | cond = TCG_COND_NE; |
925 | c->g1 = false; |
926 | c->u.s32.a = tcg_temp_new_i32(); |
927 | c->u.s32.b = tcg_const_i32(0); |
928 | tcg_gen_andi_i32(c->u.s32.a, cc_op, 1); |
929 | break; |
930 | case 0x4: /* cc == 1 */ |
931 | cond = TCG_COND_EQ; |
932 | c->u.s32.b = tcg_const_i32(1); |
933 | break; |
934 | case 0x2 | 0x1: /* cc > 1 */ |
935 | cond = TCG_COND_GTU; |
936 | c->u.s32.b = tcg_const_i32(1); |
937 | break; |
938 | case 0x2: /* cc == 2 */ |
939 | cond = TCG_COND_EQ; |
940 | c->u.s32.b = tcg_const_i32(2); |
941 | break; |
942 | case 0x1: /* cc == 3 */ |
943 | cond = TCG_COND_EQ; |
944 | c->u.s32.b = tcg_const_i32(3); |
945 | break; |
946 | default: |
947 | /* CC is masked by something else: (8 >> cc) & mask. */ |
948 | cond = TCG_COND_NE; |
949 | c->g1 = false; |
950 | c->u.s32.a = tcg_const_i32(8); |
951 | c->u.s32.b = tcg_const_i32(0); |
952 | tcg_gen_shr_i32(c->u.s32.a, c->u.s32.a, cc_op); |
953 | tcg_gen_andi_i32(c->u.s32.a, c->u.s32.a, mask); |
954 | break; |
955 | } |
956 | break; |
957 | |
958 | default: |
959 | abort(); |
960 | } |
961 | c->cond = cond; |
962 | } |
963 | |
964 | static void free_compare(DisasCompare *c) |
965 | { |
966 | if (!c->g1) { |
967 | if (c->is_64) { |
968 | tcg_temp_free_i64(c->u.s64.a); |
969 | } else { |
970 | tcg_temp_free_i32(c->u.s32.a); |
971 | } |
972 | } |
973 | if (!c->g2) { |
974 | if (c->is_64) { |
975 | tcg_temp_free_i64(c->u.s64.b); |
976 | } else { |
977 | tcg_temp_free_i32(c->u.s32.b); |
978 | } |
979 | } |
980 | } |
981 | |
982 | /* ====================================================================== */ |
983 | /* Define the insn format enumeration. */ |
984 | #define F0(N) FMT_##N, |
985 | #define F1(N, X1) F0(N) |
986 | #define F2(N, X1, X2) F0(N) |
987 | #define F3(N, X1, X2, X3) F0(N) |
988 | #define F4(N, X1, X2, X3, X4) F0(N) |
989 | #define F5(N, X1, X2, X3, X4, X5) F0(N) |
990 | #define F6(N, X1, X2, X3, X4, X5, X6) F0(N) |
991 | |
992 | typedef enum { |
993 | #include "insn-format.def" |
994 | } DisasFormat; |
995 | |
996 | #undef F0 |
997 | #undef F1 |
998 | #undef F2 |
999 | #undef F3 |
1000 | #undef F4 |
1001 | #undef F5 |
1002 | #undef F6 |
1003 | |
1004 | /* Define a structure to hold the decoded fields. We'll store each inside |
1005 | an array indexed by an enum. In order to conserve memory, we'll arrange |
1006 | for fields that do not exist at the same time to overlap, thus the "C" |
1007 | for compact. For checking purposes there is an "O" for original index |
1008 | as well that will be applied to availability bitmaps. */ |
1009 | |
1010 | enum DisasFieldIndexO { |
1011 | FLD_O_r1, |
1012 | FLD_O_r2, |
1013 | FLD_O_r3, |
1014 | FLD_O_m1, |
1015 | FLD_O_m3, |
1016 | FLD_O_m4, |
1017 | FLD_O_m5, |
1018 | FLD_O_m6, |
1019 | FLD_O_b1, |
1020 | FLD_O_b2, |
1021 | FLD_O_b4, |
1022 | FLD_O_d1, |
1023 | FLD_O_d2, |
1024 | FLD_O_d4, |
1025 | FLD_O_x2, |
1026 | FLD_O_l1, |
1027 | FLD_O_l2, |
1028 | FLD_O_i1, |
1029 | FLD_O_i2, |
1030 | FLD_O_i3, |
1031 | FLD_O_i4, |
1032 | FLD_O_i5, |
1033 | FLD_O_v1, |
1034 | FLD_O_v2, |
1035 | FLD_O_v3, |
1036 | FLD_O_v4, |
1037 | }; |
1038 | |
1039 | enum DisasFieldIndexC { |
1040 | FLD_C_r1 = 0, |
1041 | FLD_C_m1 = 0, |
1042 | FLD_C_b1 = 0, |
1043 | FLD_C_i1 = 0, |
1044 | FLD_C_v1 = 0, |
1045 | |
1046 | FLD_C_r2 = 1, |
1047 | FLD_C_b2 = 1, |
1048 | FLD_C_i2 = 1, |
1049 | |
1050 | FLD_C_r3 = 2, |
1051 | FLD_C_m3 = 2, |
1052 | FLD_C_i3 = 2, |
1053 | FLD_C_v3 = 2, |
1054 | |
1055 | FLD_C_m4 = 3, |
1056 | FLD_C_b4 = 3, |
1057 | FLD_C_i4 = 3, |
1058 | FLD_C_l1 = 3, |
1059 | FLD_C_v4 = 3, |
1060 | |
1061 | FLD_C_i5 = 4, |
1062 | FLD_C_d1 = 4, |
1063 | FLD_C_m5 = 4, |
1064 | |
1065 | FLD_C_d2 = 5, |
1066 | FLD_C_m6 = 5, |
1067 | |
1068 | FLD_C_d4 = 6, |
1069 | FLD_C_x2 = 6, |
1070 | FLD_C_l2 = 6, |
1071 | FLD_C_v2 = 6, |
1072 | |
1073 | NUM_C_FIELD = 7 |
1074 | }; |
1075 | |
1076 | struct DisasFields { |
1077 | uint64_t raw_insn; |
1078 | unsigned op:8; |
1079 | unsigned op2:8; |
1080 | unsigned presentC:16; |
1081 | unsigned int presentO; |
1082 | int c[NUM_C_FIELD]; |
1083 | }; |
1084 | |
1085 | /* This is the way fields are to be accessed out of DisasFields. */ |
1086 | #define have_field(S, F) have_field1((S), FLD_O_##F) |
1087 | #define get_field(S, F) get_field1((S), FLD_O_##F, FLD_C_##F) |
1088 | |
1089 | static bool have_field1(const DisasFields *f, enum DisasFieldIndexO c) |
1090 | { |
1091 | return (f->presentO >> c) & 1; |
1092 | } |
1093 | |
1094 | static int get_field1(const DisasFields *f, enum DisasFieldIndexO o, |
1095 | enum DisasFieldIndexC c) |
1096 | { |
1097 | assert(have_field1(f, o)); |
1098 | return f->c[c]; |
1099 | } |
1100 | |
1101 | /* Describe the layout of each field in each format. */ |
1102 | typedef struct DisasField { |
1103 | unsigned int beg:8; |
1104 | unsigned int size:8; |
1105 | unsigned int type:2; |
1106 | unsigned int indexC:6; |
1107 | enum DisasFieldIndexO indexO:8; |
1108 | } DisasField; |
1109 | |
1110 | typedef struct DisasFormatInfo { |
1111 | DisasField op[NUM_C_FIELD]; |
1112 | } DisasFormatInfo; |
1113 | |
1114 | #define R(N, B) { B, 4, 0, FLD_C_r##N, FLD_O_r##N } |
1115 | #define M(N, B) { B, 4, 0, FLD_C_m##N, FLD_O_m##N } |
1116 | #define V(N, B) { B, 4, 3, FLD_C_v##N, FLD_O_v##N } |
1117 | #define BD(N, BB, BD) { BB, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ |
1118 | { BD, 12, 0, FLD_C_d##N, FLD_O_d##N } |
1119 | #define BXD(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ |
1120 | { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ |
1121 | { 20, 12, 0, FLD_C_d##N, FLD_O_d##N } |
1122 | #define BDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ |
1123 | { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } |
1124 | #define BXDL(N) { 16, 4, 0, FLD_C_b##N, FLD_O_b##N }, \ |
1125 | { 12, 4, 0, FLD_C_x##N, FLD_O_x##N }, \ |
1126 | { 20, 20, 2, FLD_C_d##N, FLD_O_d##N } |
1127 | #define I(N, B, S) { B, S, 1, FLD_C_i##N, FLD_O_i##N } |
1128 | #define L(N, B, S) { B, S, 0, FLD_C_l##N, FLD_O_l##N } |
1129 | |
1130 | #define F0(N) { { } }, |
1131 | #define F1(N, X1) { { X1 } }, |
1132 | #define F2(N, X1, X2) { { X1, X2 } }, |
1133 | #define F3(N, X1, X2, X3) { { X1, X2, X3 } }, |
1134 | #define F4(N, X1, X2, X3, X4) { { X1, X2, X3, X4 } }, |
1135 | #define F5(N, X1, X2, X3, X4, X5) { { X1, X2, X3, X4, X5 } }, |
1136 | #define F6(N, X1, X2, X3, X4, X5, X6) { { X1, X2, X3, X4, X5, X6 } }, |
1137 | |
1138 | static const DisasFormatInfo format_info[] = { |
1139 | #include "insn-format.def" |
1140 | }; |
1141 | |
1142 | #undef F0 |
1143 | #undef F1 |
1144 | #undef F2 |
1145 | #undef F3 |
1146 | #undef F4 |
1147 | #undef F5 |
1148 | #undef F6 |
1149 | #undef R |
1150 | #undef M |
1151 | #undef V |
1152 | #undef BD |
1153 | #undef BXD |
1154 | #undef BDL |
1155 | #undef BXDL |
1156 | #undef I |
1157 | #undef L |
1158 | |
1159 | /* Generally, we'll extract operands into this structures, operate upon |
1160 | them, and store them back. See the "in1", "in2", "prep", "wout" sets |
1161 | of routines below for more details. */ |
1162 | typedef struct { |
1163 | bool g_out, g_out2, g_in1, g_in2; |
1164 | TCGv_i64 out, out2, in1, in2; |
1165 | TCGv_i64 addr1; |
1166 | } DisasOps; |
1167 | |
1168 | /* Instructions can place constraints on their operands, raising specification |
1169 | exceptions if they are violated. To make this easy to automate, each "in1", |
1170 | "in2", "prep", "wout" helper will have a SPEC_<name> define that equals one |
1171 | of the following, or 0. To make this easy to document, we'll put the |
1172 | SPEC_<name> defines next to <name>. */ |
1173 | |
1174 | #define SPEC_r1_even 1 |
1175 | #define SPEC_r2_even 2 |
1176 | #define SPEC_r3_even 4 |
1177 | #define SPEC_r1_f128 8 |
1178 | #define SPEC_r2_f128 16 |
1179 | |
1180 | /* Return values from translate_one, indicating the state of the TB. */ |
1181 | |
1182 | /* We are not using a goto_tb (for whatever reason), but have updated |
1183 | the PC (for whatever reason), so there's no need to do it again on |
1184 | exiting the TB. */ |
1185 | #define DISAS_PC_UPDATED DISAS_TARGET_0 |
1186 | |
1187 | /* We have emitted one or more goto_tb. No fixup required. */ |
1188 | #define DISAS_GOTO_TB DISAS_TARGET_1 |
1189 | |
1190 | /* We have updated the PC and CC values. */ |
1191 | #define DISAS_PC_CC_UPDATED DISAS_TARGET_2 |
1192 | |
1193 | /* We are exiting the TB, but have neither emitted a goto_tb, nor |
1194 | updated the PC for the next instruction to be executed. */ |
1195 | #define DISAS_PC_STALE DISAS_TARGET_3 |
1196 | |
1197 | /* We are exiting the TB to the main loop. */ |
1198 | #define DISAS_PC_STALE_NOCHAIN DISAS_TARGET_4 |
1199 | |
1200 | |
1201 | /* Instruction flags */ |
1202 | #define IF_AFP1 0x0001 /* r1 is a fp reg for HFP/FPS instructions */ |
1203 | #define IF_AFP2 0x0002 /* r2 is a fp reg for HFP/FPS instructions */ |
1204 | #define IF_AFP3 0x0004 /* r3 is a fp reg for HFP/FPS instructions */ |
1205 | #define IF_BFP 0x0008 /* binary floating point instruction */ |
1206 | #define IF_DFP 0x0010 /* decimal floating point instruction */ |
1207 | #define IF_PRIV 0x0020 /* privileged instruction */ |
1208 | #define IF_VEC 0x0040 /* vector instruction */ |
1209 | |
1210 | struct DisasInsn { |
1211 | unsigned opc:16; |
1212 | unsigned flags:16; |
1213 | DisasFormat fmt:8; |
1214 | unsigned fac:8; |
1215 | unsigned spec:8; |
1216 | |
1217 | const char *name; |
1218 | |
1219 | /* Pre-process arguments before HELP_OP. */ |
1220 | void (*help_in1)(DisasContext *, DisasFields *, DisasOps *); |
1221 | void (*help_in2)(DisasContext *, DisasFields *, DisasOps *); |
1222 | void (*help_prep)(DisasContext *, DisasFields *, DisasOps *); |
1223 | |
1224 | /* |
1225 | * Post-process output after HELP_OP. |
1226 | * Note that these are not called if HELP_OP returns DISAS_NORETURN. |
1227 | */ |
1228 | void (*help_wout)(DisasContext *, DisasFields *, DisasOps *); |
1229 | void (*help_cout)(DisasContext *, DisasOps *); |
1230 | |
1231 | /* Implement the operation itself. */ |
1232 | DisasJumpType (*help_op)(DisasContext *, DisasOps *); |
1233 | |
1234 | uint64_t data; |
1235 | }; |
1236 | |
1237 | /* ====================================================================== */ |
1238 | /* Miscellaneous helpers, used by several operations. */ |
1239 | |
1240 | static void help_l2_shift(DisasContext *s, DisasFields *f, |
1241 | DisasOps *o, int mask) |
1242 | { |
1243 | int b2 = get_field(f, b2); |
1244 | int d2 = get_field(f, d2); |
1245 | |
1246 | if (b2 == 0) { |
1247 | o->in2 = tcg_const_i64(d2 & mask); |
1248 | } else { |
1249 | o->in2 = get_address(s, 0, b2, d2); |
1250 | tcg_gen_andi_i64(o->in2, o->in2, mask); |
1251 | } |
1252 | } |
1253 | |
1254 | static DisasJumpType help_goto_direct(DisasContext *s, uint64_t dest) |
1255 | { |
1256 | if (dest == s->pc_tmp) { |
1257 | per_branch(s, true); |
1258 | return DISAS_NEXT; |
1259 | } |
1260 | if (use_goto_tb(s, dest)) { |
1261 | update_cc_op(s); |
1262 | per_breaking_event(s); |
1263 | tcg_gen_goto_tb(0); |
1264 | tcg_gen_movi_i64(psw_addr, dest); |
1265 | tcg_gen_exit_tb(s->base.tb, 0); |
1266 | return DISAS_GOTO_TB; |
1267 | } else { |
1268 | tcg_gen_movi_i64(psw_addr, dest); |
1269 | per_branch(s, false); |
1270 | return DISAS_PC_UPDATED; |
1271 | } |
1272 | } |
1273 | |
1274 | static DisasJumpType help_branch(DisasContext *s, DisasCompare *c, |
1275 | bool is_imm, int imm, TCGv_i64 cdest) |
1276 | { |
1277 | DisasJumpType ret; |
1278 | uint64_t dest = s->base.pc_next + 2 * imm; |
1279 | TCGLabel *lab; |
1280 | |
1281 | /* Take care of the special cases first. */ |
1282 | if (c->cond == TCG_COND_NEVER) { |
1283 | ret = DISAS_NEXT; |
1284 | goto egress; |
1285 | } |
1286 | if (is_imm) { |
1287 | if (dest == s->pc_tmp) { |
1288 | /* Branch to next. */ |
1289 | per_branch(s, true); |
1290 | ret = DISAS_NEXT; |
1291 | goto egress; |
1292 | } |
1293 | if (c->cond == TCG_COND_ALWAYS) { |
1294 | ret = help_goto_direct(s, dest); |
1295 | goto egress; |
1296 | } |
1297 | } else { |
1298 | if (!cdest) { |
1299 | /* E.g. bcr %r0 -> no branch. */ |
1300 | ret = DISAS_NEXT; |
1301 | goto egress; |
1302 | } |
1303 | if (c->cond == TCG_COND_ALWAYS) { |
1304 | tcg_gen_mov_i64(psw_addr, cdest); |
1305 | per_branch(s, false); |
1306 | ret = DISAS_PC_UPDATED; |
1307 | goto egress; |
1308 | } |
1309 | } |
1310 | |
1311 | if (use_goto_tb(s, s->pc_tmp)) { |
1312 | if (is_imm && use_goto_tb(s, dest)) { |
1313 | /* Both exits can use goto_tb. */ |
1314 | update_cc_op(s); |
1315 | |
1316 | lab = gen_new_label(); |
1317 | if (c->is_64) { |
1318 | tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab); |
1319 | } else { |
1320 | tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab); |
1321 | } |
1322 | |
1323 | /* Branch not taken. */ |
1324 | tcg_gen_goto_tb(0); |
1325 | tcg_gen_movi_i64(psw_addr, s->pc_tmp); |
1326 | tcg_gen_exit_tb(s->base.tb, 0); |
1327 | |
1328 | /* Branch taken. */ |
1329 | gen_set_label(lab); |
1330 | per_breaking_event(s); |
1331 | tcg_gen_goto_tb(1); |
1332 | tcg_gen_movi_i64(psw_addr, dest); |
1333 | tcg_gen_exit_tb(s->base.tb, 1); |
1334 | |
1335 | ret = DISAS_GOTO_TB; |
1336 | } else { |
1337 | /* Fallthru can use goto_tb, but taken branch cannot. */ |
1338 | /* Store taken branch destination before the brcond. This |
1339 | avoids having to allocate a new local temp to hold it. |
1340 | We'll overwrite this in the not taken case anyway. */ |
1341 | if (!is_imm) { |
1342 | tcg_gen_mov_i64(psw_addr, cdest); |
1343 | } |
1344 | |
1345 | lab = gen_new_label(); |
1346 | if (c->is_64) { |
1347 | tcg_gen_brcond_i64(c->cond, c->u.s64.a, c->u.s64.b, lab); |
1348 | } else { |
1349 | tcg_gen_brcond_i32(c->cond, c->u.s32.a, c->u.s32.b, lab); |
1350 | } |
1351 | |
1352 | /* Branch not taken. */ |
1353 | update_cc_op(s); |
1354 | tcg_gen_goto_tb(0); |
1355 | tcg_gen_movi_i64(psw_addr, s->pc_tmp); |
1356 | tcg_gen_exit_tb(s->base.tb, 0); |
1357 | |
1358 | gen_set_label(lab); |
1359 | if (is_imm) { |
1360 | tcg_gen_movi_i64(psw_addr, dest); |
1361 | } |
1362 | per_breaking_event(s); |
1363 | ret = DISAS_PC_UPDATED; |
1364 | } |
1365 | } else { |
1366 | /* Fallthru cannot use goto_tb. This by itself is vanishingly rare. |
1367 | Most commonly we're single-stepping or some other condition that |
1368 | disables all use of goto_tb. Just update the PC and exit. */ |
1369 | |
1370 | TCGv_i64 next = tcg_const_i64(s->pc_tmp); |
1371 | if (is_imm) { |
1372 | cdest = tcg_const_i64(dest); |
1373 | } |
1374 | |
1375 | if (c->is_64) { |
1376 | tcg_gen_movcond_i64(c->cond, psw_addr, c->u.s64.a, c->u.s64.b, |
1377 | cdest, next); |
1378 | per_branch_cond(s, c->cond, c->u.s64.a, c->u.s64.b); |
1379 | } else { |
1380 | TCGv_i32 t0 = tcg_temp_new_i32(); |
1381 | TCGv_i64 t1 = tcg_temp_new_i64(); |
1382 | TCGv_i64 z = tcg_const_i64(0); |
1383 | tcg_gen_setcond_i32(c->cond, t0, c->u.s32.a, c->u.s32.b); |
1384 | tcg_gen_extu_i32_i64(t1, t0); |
1385 | tcg_temp_free_i32(t0); |
1386 | tcg_gen_movcond_i64(TCG_COND_NE, psw_addr, t1, z, cdest, next); |
1387 | per_branch_cond(s, TCG_COND_NE, t1, z); |
1388 | tcg_temp_free_i64(t1); |
1389 | tcg_temp_free_i64(z); |
1390 | } |
1391 | |
1392 | if (is_imm) { |
1393 | tcg_temp_free_i64(cdest); |
1394 | } |
1395 | tcg_temp_free_i64(next); |
1396 | |
1397 | ret = DISAS_PC_UPDATED; |
1398 | } |
1399 | |
1400 | egress: |
1401 | free_compare(c); |
1402 | return ret; |
1403 | } |
1404 | |
1405 | /* ====================================================================== */ |
1406 | /* The operations. These perform the bulk of the work for any insn, |
1407 | usually after the operands have been loaded and output initialized. */ |
1408 | |
1409 | static DisasJumpType op_abs(DisasContext *s, DisasOps *o) |
1410 | { |
1411 | tcg_gen_abs_i64(o->out, o->in2); |
1412 | return DISAS_NEXT; |
1413 | } |
1414 | |
1415 | static DisasJumpType op_absf32(DisasContext *s, DisasOps *o) |
1416 | { |
1417 | tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffull); |
1418 | return DISAS_NEXT; |
1419 | } |
1420 | |
1421 | static DisasJumpType op_absf64(DisasContext *s, DisasOps *o) |
1422 | { |
1423 | tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull); |
1424 | return DISAS_NEXT; |
1425 | } |
1426 | |
1427 | static DisasJumpType op_absf128(DisasContext *s, DisasOps *o) |
1428 | { |
1429 | tcg_gen_andi_i64(o->out, o->in1, 0x7fffffffffffffffull); |
1430 | tcg_gen_mov_i64(o->out2, o->in2); |
1431 | return DISAS_NEXT; |
1432 | } |
1433 | |
1434 | static DisasJumpType op_add(DisasContext *s, DisasOps *o) |
1435 | { |
1436 | tcg_gen_add_i64(o->out, o->in1, o->in2); |
1437 | return DISAS_NEXT; |
1438 | } |
1439 | |
1440 | static DisasJumpType op_addc(DisasContext *s, DisasOps *o) |
1441 | { |
1442 | DisasCompare cmp; |
1443 | TCGv_i64 carry; |
1444 | |
1445 | tcg_gen_add_i64(o->out, o->in1, o->in2); |
1446 | |
1447 | /* The carry flag is the msb of CC, therefore the branch mask that would |
1448 | create that comparison is 3. Feeding the generated comparison to |
1449 | setcond produces the carry flag that we desire. */ |
1450 | disas_jcc(s, &cmp, 3); |
1451 | carry = tcg_temp_new_i64(); |
1452 | if (cmp.is_64) { |
1453 | tcg_gen_setcond_i64(cmp.cond, carry, cmp.u.s64.a, cmp.u.s64.b); |
1454 | } else { |
1455 | TCGv_i32 t = tcg_temp_new_i32(); |
1456 | tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b); |
1457 | tcg_gen_extu_i32_i64(carry, t); |
1458 | tcg_temp_free_i32(t); |
1459 | } |
1460 | free_compare(&cmp); |
1461 | |
1462 | tcg_gen_add_i64(o->out, o->out, carry); |
1463 | tcg_temp_free_i64(carry); |
1464 | return DISAS_NEXT; |
1465 | } |
1466 | |
1467 | static DisasJumpType op_asi(DisasContext *s, DisasOps *o) |
1468 | { |
1469 | o->in1 = tcg_temp_new_i64(); |
1470 | |
1471 | if (!s390_has_feat(S390_FEAT_STFLE_45)) { |
1472 | tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); |
1473 | } else { |
1474 | /* Perform the atomic addition in memory. */ |
1475 | tcg_gen_atomic_fetch_add_i64(o->in1, o->addr1, o->in2, get_mem_index(s), |
1476 | s->insn->data); |
1477 | } |
1478 | |
1479 | /* Recompute also for atomic case: needed for setting CC. */ |
1480 | tcg_gen_add_i64(o->out, o->in1, o->in2); |
1481 | |
1482 | if (!s390_has_feat(S390_FEAT_STFLE_45)) { |
1483 | tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); |
1484 | } |
1485 | return DISAS_NEXT; |
1486 | } |
1487 | |
1488 | static DisasJumpType op_aeb(DisasContext *s, DisasOps *o) |
1489 | { |
1490 | gen_helper_aeb(o->out, cpu_env, o->in1, o->in2); |
1491 | return DISAS_NEXT; |
1492 | } |
1493 | |
1494 | static DisasJumpType op_adb(DisasContext *s, DisasOps *o) |
1495 | { |
1496 | gen_helper_adb(o->out, cpu_env, o->in1, o->in2); |
1497 | return DISAS_NEXT; |
1498 | } |
1499 | |
1500 | static DisasJumpType op_axb(DisasContext *s, DisasOps *o) |
1501 | { |
1502 | gen_helper_axb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); |
1503 | return_low128(o->out2); |
1504 | return DISAS_NEXT; |
1505 | } |
1506 | |
1507 | static DisasJumpType op_and(DisasContext *s, DisasOps *o) |
1508 | { |
1509 | tcg_gen_and_i64(o->out, o->in1, o->in2); |
1510 | return DISAS_NEXT; |
1511 | } |
1512 | |
1513 | static DisasJumpType op_andi(DisasContext *s, DisasOps *o) |
1514 | { |
1515 | int shift = s->insn->data & 0xff; |
1516 | int size = s->insn->data >> 8; |
1517 | uint64_t mask = ((1ull << size) - 1) << shift; |
1518 | |
1519 | assert(!o->g_in2); |
1520 | tcg_gen_shli_i64(o->in2, o->in2, shift); |
1521 | tcg_gen_ori_i64(o->in2, o->in2, ~mask); |
1522 | tcg_gen_and_i64(o->out, o->in1, o->in2); |
1523 | |
1524 | /* Produce the CC from only the bits manipulated. */ |
1525 | tcg_gen_andi_i64(cc_dst, o->out, mask); |
1526 | set_cc_nz_u64(s, cc_dst); |
1527 | return DISAS_NEXT; |
1528 | } |
1529 | |
1530 | static DisasJumpType op_ni(DisasContext *s, DisasOps *o) |
1531 | { |
1532 | o->in1 = tcg_temp_new_i64(); |
1533 | |
1534 | if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { |
1535 | tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); |
1536 | } else { |
1537 | /* Perform the atomic operation in memory. */ |
1538 | tcg_gen_atomic_fetch_and_i64(o->in1, o->addr1, o->in2, get_mem_index(s), |
1539 | s->insn->data); |
1540 | } |
1541 | |
1542 | /* Recompute also for atomic case: needed for setting CC. */ |
1543 | tcg_gen_and_i64(o->out, o->in1, o->in2); |
1544 | |
1545 | if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { |
1546 | tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); |
1547 | } |
1548 | return DISAS_NEXT; |
1549 | } |
1550 | |
1551 | static DisasJumpType op_bas(DisasContext *s, DisasOps *o) |
1552 | { |
1553 | pc_to_link_info(o->out, s, s->pc_tmp); |
1554 | if (o->in2) { |
1555 | tcg_gen_mov_i64(psw_addr, o->in2); |
1556 | per_branch(s, false); |
1557 | return DISAS_PC_UPDATED; |
1558 | } else { |
1559 | return DISAS_NEXT; |
1560 | } |
1561 | } |
1562 | |
1563 | static void save_link_info(DisasContext *s, DisasOps *o) |
1564 | { |
1565 | TCGv_i64 t; |
1566 | |
1567 | if (s->base.tb->flags & (FLAG_MASK_32 | FLAG_MASK_64)) { |
1568 | pc_to_link_info(o->out, s, s->pc_tmp); |
1569 | return; |
1570 | } |
1571 | gen_op_calc_cc(s); |
1572 | tcg_gen_andi_i64(o->out, o->out, 0xffffffff00000000ull); |
1573 | tcg_gen_ori_i64(o->out, o->out, ((s->ilen / 2) << 30) | s->pc_tmp); |
1574 | t = tcg_temp_new_i64(); |
1575 | tcg_gen_shri_i64(t, psw_mask, 16); |
1576 | tcg_gen_andi_i64(t, t, 0x0f000000); |
1577 | tcg_gen_or_i64(o->out, o->out, t); |
1578 | tcg_gen_extu_i32_i64(t, cc_op); |
1579 | tcg_gen_shli_i64(t, t, 28); |
1580 | tcg_gen_or_i64(o->out, o->out, t); |
1581 | tcg_temp_free_i64(t); |
1582 | } |
1583 | |
1584 | static DisasJumpType op_bal(DisasContext *s, DisasOps *o) |
1585 | { |
1586 | save_link_info(s, o); |
1587 | if (o->in2) { |
1588 | tcg_gen_mov_i64(psw_addr, o->in2); |
1589 | per_branch(s, false); |
1590 | return DISAS_PC_UPDATED; |
1591 | } else { |
1592 | return DISAS_NEXT; |
1593 | } |
1594 | } |
1595 | |
1596 | static DisasJumpType op_basi(DisasContext *s, DisasOps *o) |
1597 | { |
1598 | pc_to_link_info(o->out, s, s->pc_tmp); |
1599 | return help_goto_direct(s, s->base.pc_next + 2 * get_field(s->fields, i2)); |
1600 | } |
1601 | |
1602 | static DisasJumpType op_bc(DisasContext *s, DisasOps *o) |
1603 | { |
1604 | int m1 = get_field(s->fields, m1); |
1605 | bool is_imm = have_field(s->fields, i2); |
1606 | int imm = is_imm ? get_field(s->fields, i2) : 0; |
1607 | DisasCompare c; |
1608 | |
1609 | /* BCR with R2 = 0 causes no branching */ |
1610 | if (have_field(s->fields, r2) && get_field(s->fields, r2) == 0) { |
1611 | if (m1 == 14) { |
1612 | /* Perform serialization */ |
1613 | /* FIXME: check for fast-BCR-serialization facility */ |
1614 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); |
1615 | } |
1616 | if (m1 == 15) { |
1617 | /* Perform serialization */ |
1618 | /* FIXME: perform checkpoint-synchronisation */ |
1619 | tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); |
1620 | } |
1621 | return DISAS_NEXT; |
1622 | } |
1623 | |
1624 | disas_jcc(s, &c, m1); |
1625 | return help_branch(s, &c, is_imm, imm, o->in2); |
1626 | } |
1627 | |
1628 | static DisasJumpType op_bct32(DisasContext *s, DisasOps *o) |
1629 | { |
1630 | int r1 = get_field(s->fields, r1); |
1631 | bool is_imm = have_field(s->fields, i2); |
1632 | int imm = is_imm ? get_field(s->fields, i2) : 0; |
1633 | DisasCompare c; |
1634 | TCGv_i64 t; |
1635 | |
1636 | c.cond = TCG_COND_NE; |
1637 | c.is_64 = false; |
1638 | c.g1 = false; |
1639 | c.g2 = false; |
1640 | |
1641 | t = tcg_temp_new_i64(); |
1642 | tcg_gen_subi_i64(t, regs[r1], 1); |
1643 | store_reg32_i64(r1, t); |
1644 | c.u.s32.a = tcg_temp_new_i32(); |
1645 | c.u.s32.b = tcg_const_i32(0); |
1646 | tcg_gen_extrl_i64_i32(c.u.s32.a, t); |
1647 | tcg_temp_free_i64(t); |
1648 | |
1649 | return help_branch(s, &c, is_imm, imm, o->in2); |
1650 | } |
1651 | |
1652 | static DisasJumpType op_bcth(DisasContext *s, DisasOps *o) |
1653 | { |
1654 | int r1 = get_field(s->fields, r1); |
1655 | int imm = get_field(s->fields, i2); |
1656 | DisasCompare c; |
1657 | TCGv_i64 t; |
1658 | |
1659 | c.cond = TCG_COND_NE; |
1660 | c.is_64 = false; |
1661 | c.g1 = false; |
1662 | c.g2 = false; |
1663 | |
1664 | t = tcg_temp_new_i64(); |
1665 | tcg_gen_shri_i64(t, regs[r1], 32); |
1666 | tcg_gen_subi_i64(t, t, 1); |
1667 | store_reg32h_i64(r1, t); |
1668 | c.u.s32.a = tcg_temp_new_i32(); |
1669 | c.u.s32.b = tcg_const_i32(0); |
1670 | tcg_gen_extrl_i64_i32(c.u.s32.a, t); |
1671 | tcg_temp_free_i64(t); |
1672 | |
1673 | return help_branch(s, &c, 1, imm, o->in2); |
1674 | } |
1675 | |
1676 | static DisasJumpType op_bct64(DisasContext *s, DisasOps *o) |
1677 | { |
1678 | int r1 = get_field(s->fields, r1); |
1679 | bool is_imm = have_field(s->fields, i2); |
1680 | int imm = is_imm ? get_field(s->fields, i2) : 0; |
1681 | DisasCompare c; |
1682 | |
1683 | c.cond = TCG_COND_NE; |
1684 | c.is_64 = true; |
1685 | c.g1 = true; |
1686 | c.g2 = false; |
1687 | |
1688 | tcg_gen_subi_i64(regs[r1], regs[r1], 1); |
1689 | c.u.s64.a = regs[r1]; |
1690 | c.u.s64.b = tcg_const_i64(0); |
1691 | |
1692 | return help_branch(s, &c, is_imm, imm, o->in2); |
1693 | } |
1694 | |
1695 | static DisasJumpType op_bx32(DisasContext *s, DisasOps *o) |
1696 | { |
1697 | int r1 = get_field(s->fields, r1); |
1698 | int r3 = get_field(s->fields, r3); |
1699 | bool is_imm = have_field(s->fields, i2); |
1700 | int imm = is_imm ? get_field(s->fields, i2) : 0; |
1701 | DisasCompare c; |
1702 | TCGv_i64 t; |
1703 | |
1704 | c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); |
1705 | c.is_64 = false; |
1706 | c.g1 = false; |
1707 | c.g2 = false; |
1708 | |
1709 | t = tcg_temp_new_i64(); |
1710 | tcg_gen_add_i64(t, regs[r1], regs[r3]); |
1711 | c.u.s32.a = tcg_temp_new_i32(); |
1712 | c.u.s32.b = tcg_temp_new_i32(); |
1713 | tcg_gen_extrl_i64_i32(c.u.s32.a, t); |
1714 | tcg_gen_extrl_i64_i32(c.u.s32.b, regs[r3 | 1]); |
1715 | store_reg32_i64(r1, t); |
1716 | tcg_temp_free_i64(t); |
1717 | |
1718 | return help_branch(s, &c, is_imm, imm, o->in2); |
1719 | } |
1720 | |
1721 | static DisasJumpType op_bx64(DisasContext *s, DisasOps *o) |
1722 | { |
1723 | int r1 = get_field(s->fields, r1); |
1724 | int r3 = get_field(s->fields, r3); |
1725 | bool is_imm = have_field(s->fields, i2); |
1726 | int imm = is_imm ? get_field(s->fields, i2) : 0; |
1727 | DisasCompare c; |
1728 | |
1729 | c.cond = (s->insn->data ? TCG_COND_LE : TCG_COND_GT); |
1730 | c.is_64 = true; |
1731 | |
1732 | if (r1 == (r3 | 1)) { |
1733 | c.u.s64.b = load_reg(r3 | 1); |
1734 | c.g2 = false; |
1735 | } else { |
1736 | c.u.s64.b = regs[r3 | 1]; |
1737 | c.g2 = true; |
1738 | } |
1739 | |
1740 | tcg_gen_add_i64(regs[r1], regs[r1], regs[r3]); |
1741 | c.u.s64.a = regs[r1]; |
1742 | c.g1 = true; |
1743 | |
1744 | return help_branch(s, &c, is_imm, imm, o->in2); |
1745 | } |
1746 | |
1747 | static DisasJumpType op_cj(DisasContext *s, DisasOps *o) |
1748 | { |
1749 | int imm, m3 = get_field(s->fields, m3); |
1750 | bool is_imm; |
1751 | DisasCompare c; |
1752 | |
1753 | c.cond = ltgt_cond[m3]; |
1754 | if (s->insn->data) { |
1755 | c.cond = tcg_unsigned_cond(c.cond); |
1756 | } |
1757 | c.is_64 = c.g1 = c.g2 = true; |
1758 | c.u.s64.a = o->in1; |
1759 | c.u.s64.b = o->in2; |
1760 | |
1761 | is_imm = have_field(s->fields, i4); |
1762 | if (is_imm) { |
1763 | imm = get_field(s->fields, i4); |
1764 | } else { |
1765 | imm = 0; |
1766 | o->out = get_address(s, 0, get_field(s->fields, b4), |
1767 | get_field(s->fields, d4)); |
1768 | } |
1769 | |
1770 | return help_branch(s, &c, is_imm, imm, o->out); |
1771 | } |
1772 | |
1773 | static DisasJumpType op_ceb(DisasContext *s, DisasOps *o) |
1774 | { |
1775 | gen_helper_ceb(cc_op, cpu_env, o->in1, o->in2); |
1776 | set_cc_static(s); |
1777 | return DISAS_NEXT; |
1778 | } |
1779 | |
1780 | static DisasJumpType op_cdb(DisasContext *s, DisasOps *o) |
1781 | { |
1782 | gen_helper_cdb(cc_op, cpu_env, o->in1, o->in2); |
1783 | set_cc_static(s); |
1784 | return DISAS_NEXT; |
1785 | } |
1786 | |
1787 | static DisasJumpType op_cxb(DisasContext *s, DisasOps *o) |
1788 | { |
1789 | gen_helper_cxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2); |
1790 | set_cc_static(s); |
1791 | return DISAS_NEXT; |
1792 | } |
1793 | |
1794 | static TCGv_i32 (DisasContext *s, bool m3_with_fpe, |
1795 | bool m4_with_fpe) |
1796 | { |
1797 | const bool fpe = s390_has_feat(S390_FEAT_FLOATING_POINT_EXT); |
1798 | uint8_t m3 = get_field(s->fields, m3); |
1799 | uint8_t m4 = get_field(s->fields, m4); |
1800 | |
1801 | /* m3 field was introduced with FPE */ |
1802 | if (!fpe && m3_with_fpe) { |
1803 | m3 = 0; |
1804 | } |
1805 | /* m4 field was introduced with FPE */ |
1806 | if (!fpe && m4_with_fpe) { |
1807 | m4 = 0; |
1808 | } |
1809 | |
1810 | /* Check for valid rounding modes. Mode 3 was introduced later. */ |
1811 | if (m3 == 2 || m3 > 7 || (!fpe && m3 == 3)) { |
1812 | gen_program_exception(s, PGM_SPECIFICATION); |
1813 | return NULL; |
1814 | } |
1815 | |
1816 | return tcg_const_i32(deposit32(m3, 4, 4, m4)); |
1817 | } |
1818 | |
1819 | static DisasJumpType op_cfeb(DisasContext *s, DisasOps *o) |
1820 | { |
1821 | TCGv_i32 m34 = fpinst_extract_m34(s, false, true); |
1822 | |
1823 | if (!m34) { |
1824 | return DISAS_NORETURN; |
1825 | } |
1826 | gen_helper_cfeb(o->out, cpu_env, o->in2, m34); |
1827 | tcg_temp_free_i32(m34); |
1828 | gen_set_cc_nz_f32(s, o->in2); |
1829 | return DISAS_NEXT; |
1830 | } |
1831 | |
1832 | static DisasJumpType op_cfdb(DisasContext *s, DisasOps *o) |
1833 | { |
1834 | TCGv_i32 m34 = fpinst_extract_m34(s, false, true); |
1835 | |
1836 | if (!m34) { |
1837 | return DISAS_NORETURN; |
1838 | } |
1839 | gen_helper_cfdb(o->out, cpu_env, o->in2, m34); |
1840 | tcg_temp_free_i32(m34); |
1841 | gen_set_cc_nz_f64(s, o->in2); |
1842 | return DISAS_NEXT; |
1843 | } |
1844 | |
1845 | static DisasJumpType op_cfxb(DisasContext *s, DisasOps *o) |
1846 | { |
1847 | TCGv_i32 m34 = fpinst_extract_m34(s, false, true); |
1848 | |
1849 | if (!m34) { |
1850 | return DISAS_NORETURN; |
1851 | } |
1852 | gen_helper_cfxb(o->out, cpu_env, o->in1, o->in2, m34); |
1853 | tcg_temp_free_i32(m34); |
1854 | gen_set_cc_nz_f128(s, o->in1, o->in2); |
1855 | return DISAS_NEXT; |
1856 | } |
1857 | |
1858 | static DisasJumpType op_cgeb(DisasContext *s, DisasOps *o) |
1859 | { |
1860 | TCGv_i32 m34 = fpinst_extract_m34(s, false, true); |
1861 | |
1862 | if (!m34) { |
1863 | return DISAS_NORETURN; |
1864 | } |
1865 | gen_helper_cgeb(o->out, cpu_env, o->in2, m34); |
1866 | tcg_temp_free_i32(m34); |
1867 | gen_set_cc_nz_f32(s, o->in2); |
1868 | return DISAS_NEXT; |
1869 | } |
1870 | |
1871 | static DisasJumpType op_cgdb(DisasContext *s, DisasOps *o) |
1872 | { |
1873 | TCGv_i32 m34 = fpinst_extract_m34(s, false, true); |
1874 | |
1875 | if (!m34) { |
1876 | return DISAS_NORETURN; |
1877 | } |
1878 | gen_helper_cgdb(o->out, cpu_env, o->in2, m34); |
1879 | tcg_temp_free_i32(m34); |
1880 | gen_set_cc_nz_f64(s, o->in2); |
1881 | return DISAS_NEXT; |
1882 | } |
1883 | |
1884 | static DisasJumpType op_cgxb(DisasContext *s, DisasOps *o) |
1885 | { |
1886 | TCGv_i32 m34 = fpinst_extract_m34(s, false, true); |
1887 | |
1888 | if (!m34) { |
1889 | return DISAS_NORETURN; |
1890 | } |
1891 | gen_helper_cgxb(o->out, cpu_env, o->in1, o->in2, m34); |
1892 | tcg_temp_free_i32(m34); |
1893 | gen_set_cc_nz_f128(s, o->in1, o->in2); |
1894 | return DISAS_NEXT; |
1895 | } |
1896 | |
1897 | static DisasJumpType op_clfeb(DisasContext *s, DisasOps *o) |
1898 | { |
1899 | TCGv_i32 m34 = fpinst_extract_m34(s, false, false); |
1900 | |
1901 | if (!m34) { |
1902 | return DISAS_NORETURN; |
1903 | } |
1904 | gen_helper_clfeb(o->out, cpu_env, o->in2, m34); |
1905 | tcg_temp_free_i32(m34); |
1906 | gen_set_cc_nz_f32(s, o->in2); |
1907 | return DISAS_NEXT; |
1908 | } |
1909 | |
1910 | static DisasJumpType op_clfdb(DisasContext *s, DisasOps *o) |
1911 | { |
1912 | TCGv_i32 m34 = fpinst_extract_m34(s, false, false); |
1913 | |
1914 | if (!m34) { |
1915 | return DISAS_NORETURN; |
1916 | } |
1917 | gen_helper_clfdb(o->out, cpu_env, o->in2, m34); |
1918 | tcg_temp_free_i32(m34); |
1919 | gen_set_cc_nz_f64(s, o->in2); |
1920 | return DISAS_NEXT; |
1921 | } |
1922 | |
1923 | static DisasJumpType op_clfxb(DisasContext *s, DisasOps *o) |
1924 | { |
1925 | TCGv_i32 m34 = fpinst_extract_m34(s, false, false); |
1926 | |
1927 | if (!m34) { |
1928 | return DISAS_NORETURN; |
1929 | } |
1930 | gen_helper_clfxb(o->out, cpu_env, o->in1, o->in2, m34); |
1931 | tcg_temp_free_i32(m34); |
1932 | gen_set_cc_nz_f128(s, o->in1, o->in2); |
1933 | return DISAS_NEXT; |
1934 | } |
1935 | |
1936 | static DisasJumpType op_clgeb(DisasContext *s, DisasOps *o) |
1937 | { |
1938 | TCGv_i32 m34 = fpinst_extract_m34(s, false, false); |
1939 | |
1940 | if (!m34) { |
1941 | return DISAS_NORETURN; |
1942 | } |
1943 | gen_helper_clgeb(o->out, cpu_env, o->in2, m34); |
1944 | tcg_temp_free_i32(m34); |
1945 | gen_set_cc_nz_f32(s, o->in2); |
1946 | return DISAS_NEXT; |
1947 | } |
1948 | |
1949 | static DisasJumpType op_clgdb(DisasContext *s, DisasOps *o) |
1950 | { |
1951 | TCGv_i32 m34 = fpinst_extract_m34(s, false, false); |
1952 | |
1953 | if (!m34) { |
1954 | return DISAS_NORETURN; |
1955 | } |
1956 | gen_helper_clgdb(o->out, cpu_env, o->in2, m34); |
1957 | tcg_temp_free_i32(m34); |
1958 | gen_set_cc_nz_f64(s, o->in2); |
1959 | return DISAS_NEXT; |
1960 | } |
1961 | |
1962 | static DisasJumpType op_clgxb(DisasContext *s, DisasOps *o) |
1963 | { |
1964 | TCGv_i32 m34 = fpinst_extract_m34(s, false, false); |
1965 | |
1966 | if (!m34) { |
1967 | return DISAS_NORETURN; |
1968 | } |
1969 | gen_helper_clgxb(o->out, cpu_env, o->in1, o->in2, m34); |
1970 | tcg_temp_free_i32(m34); |
1971 | gen_set_cc_nz_f128(s, o->in1, o->in2); |
1972 | return DISAS_NEXT; |
1973 | } |
1974 | |
1975 | static DisasJumpType op_cegb(DisasContext *s, DisasOps *o) |
1976 | { |
1977 | TCGv_i32 m34 = fpinst_extract_m34(s, true, true); |
1978 | |
1979 | if (!m34) { |
1980 | return DISAS_NORETURN; |
1981 | } |
1982 | gen_helper_cegb(o->out, cpu_env, o->in2, m34); |
1983 | tcg_temp_free_i32(m34); |
1984 | return DISAS_NEXT; |
1985 | } |
1986 | |
1987 | static DisasJumpType op_cdgb(DisasContext *s, DisasOps *o) |
1988 | { |
1989 | TCGv_i32 m34 = fpinst_extract_m34(s, true, true); |
1990 | |
1991 | if (!m34) { |
1992 | return DISAS_NORETURN; |
1993 | } |
1994 | gen_helper_cdgb(o->out, cpu_env, o->in2, m34); |
1995 | tcg_temp_free_i32(m34); |
1996 | return DISAS_NEXT; |
1997 | } |
1998 | |
1999 | static DisasJumpType op_cxgb(DisasContext *s, DisasOps *o) |
2000 | { |
2001 | TCGv_i32 m34 = fpinst_extract_m34(s, true, true); |
2002 | |
2003 | if (!m34) { |
2004 | return DISAS_NORETURN; |
2005 | } |
2006 | gen_helper_cxgb(o->out, cpu_env, o->in2, m34); |
2007 | tcg_temp_free_i32(m34); |
2008 | return_low128(o->out2); |
2009 | return DISAS_NEXT; |
2010 | } |
2011 | |
2012 | static DisasJumpType op_celgb(DisasContext *s, DisasOps *o) |
2013 | { |
2014 | TCGv_i32 m34 = fpinst_extract_m34(s, false, false); |
2015 | |
2016 | if (!m34) { |
2017 | return DISAS_NORETURN; |
2018 | } |
2019 | gen_helper_celgb(o->out, cpu_env, o->in2, m34); |
2020 | tcg_temp_free_i32(m34); |
2021 | return DISAS_NEXT; |
2022 | } |
2023 | |
2024 | static DisasJumpType op_cdlgb(DisasContext *s, DisasOps *o) |
2025 | { |
2026 | TCGv_i32 m34 = fpinst_extract_m34(s, false, false); |
2027 | |
2028 | if (!m34) { |
2029 | return DISAS_NORETURN; |
2030 | } |
2031 | gen_helper_cdlgb(o->out, cpu_env, o->in2, m34); |
2032 | tcg_temp_free_i32(m34); |
2033 | return DISAS_NEXT; |
2034 | } |
2035 | |
2036 | static DisasJumpType op_cxlgb(DisasContext *s, DisasOps *o) |
2037 | { |
2038 | TCGv_i32 m34 = fpinst_extract_m34(s, false, false); |
2039 | |
2040 | if (!m34) { |
2041 | return DISAS_NORETURN; |
2042 | } |
2043 | gen_helper_cxlgb(o->out, cpu_env, o->in2, m34); |
2044 | tcg_temp_free_i32(m34); |
2045 | return_low128(o->out2); |
2046 | return DISAS_NEXT; |
2047 | } |
2048 | |
2049 | static DisasJumpType op_cksm(DisasContext *s, DisasOps *o) |
2050 | { |
2051 | int r2 = get_field(s->fields, r2); |
2052 | TCGv_i64 len = tcg_temp_new_i64(); |
2053 | |
2054 | gen_helper_cksm(len, cpu_env, o->in1, o->in2, regs[r2 + 1]); |
2055 | set_cc_static(s); |
2056 | return_low128(o->out); |
2057 | |
2058 | tcg_gen_add_i64(regs[r2], regs[r2], len); |
2059 | tcg_gen_sub_i64(regs[r2 + 1], regs[r2 + 1], len); |
2060 | tcg_temp_free_i64(len); |
2061 | |
2062 | return DISAS_NEXT; |
2063 | } |
2064 | |
2065 | static DisasJumpType op_clc(DisasContext *s, DisasOps *o) |
2066 | { |
2067 | int l = get_field(s->fields, l1); |
2068 | TCGv_i32 vl; |
2069 | |
2070 | switch (l + 1) { |
2071 | case 1: |
2072 | tcg_gen_qemu_ld8u(cc_src, o->addr1, get_mem_index(s)); |
2073 | tcg_gen_qemu_ld8u(cc_dst, o->in2, get_mem_index(s)); |
2074 | break; |
2075 | case 2: |
2076 | tcg_gen_qemu_ld16u(cc_src, o->addr1, get_mem_index(s)); |
2077 | tcg_gen_qemu_ld16u(cc_dst, o->in2, get_mem_index(s)); |
2078 | break; |
2079 | case 4: |
2080 | tcg_gen_qemu_ld32u(cc_src, o->addr1, get_mem_index(s)); |
2081 | tcg_gen_qemu_ld32u(cc_dst, o->in2, get_mem_index(s)); |
2082 | break; |
2083 | case 8: |
2084 | tcg_gen_qemu_ld64(cc_src, o->addr1, get_mem_index(s)); |
2085 | tcg_gen_qemu_ld64(cc_dst, o->in2, get_mem_index(s)); |
2086 | break; |
2087 | default: |
2088 | vl = tcg_const_i32(l); |
2089 | gen_helper_clc(cc_op, cpu_env, vl, o->addr1, o->in2); |
2090 | tcg_temp_free_i32(vl); |
2091 | set_cc_static(s); |
2092 | return DISAS_NEXT; |
2093 | } |
2094 | gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, cc_src, cc_dst); |
2095 | return DISAS_NEXT; |
2096 | } |
2097 | |
2098 | static DisasJumpType op_clcl(DisasContext *s, DisasOps *o) |
2099 | { |
2100 | int r1 = get_field(s->fields, r1); |
2101 | int r2 = get_field(s->fields, r2); |
2102 | TCGv_i32 t1, t2; |
2103 | |
2104 | /* r1 and r2 must be even. */ |
2105 | if (r1 & 1 || r2 & 1) { |
2106 | gen_program_exception(s, PGM_SPECIFICATION); |
2107 | return DISAS_NORETURN; |
2108 | } |
2109 | |
2110 | t1 = tcg_const_i32(r1); |
2111 | t2 = tcg_const_i32(r2); |
2112 | gen_helper_clcl(cc_op, cpu_env, t1, t2); |
2113 | tcg_temp_free_i32(t1); |
2114 | tcg_temp_free_i32(t2); |
2115 | set_cc_static(s); |
2116 | return DISAS_NEXT; |
2117 | } |
2118 | |
2119 | static DisasJumpType op_clcle(DisasContext *s, DisasOps *o) |
2120 | { |
2121 | int r1 = get_field(s->fields, r1); |
2122 | int r3 = get_field(s->fields, r3); |
2123 | TCGv_i32 t1, t3; |
2124 | |
2125 | /* r1 and r3 must be even. */ |
2126 | if (r1 & 1 || r3 & 1) { |
2127 | gen_program_exception(s, PGM_SPECIFICATION); |
2128 | return DISAS_NORETURN; |
2129 | } |
2130 | |
2131 | t1 = tcg_const_i32(r1); |
2132 | t3 = tcg_const_i32(r3); |
2133 | gen_helper_clcle(cc_op, cpu_env, t1, o->in2, t3); |
2134 | tcg_temp_free_i32(t1); |
2135 | tcg_temp_free_i32(t3); |
2136 | set_cc_static(s); |
2137 | return DISAS_NEXT; |
2138 | } |
2139 | |
2140 | static DisasJumpType op_clclu(DisasContext *s, DisasOps *o) |
2141 | { |
2142 | int r1 = get_field(s->fields, r1); |
2143 | int r3 = get_field(s->fields, r3); |
2144 | TCGv_i32 t1, t3; |
2145 | |
2146 | /* r1 and r3 must be even. */ |
2147 | if (r1 & 1 || r3 & 1) { |
2148 | gen_program_exception(s, PGM_SPECIFICATION); |
2149 | return DISAS_NORETURN; |
2150 | } |
2151 | |
2152 | t1 = tcg_const_i32(r1); |
2153 | t3 = tcg_const_i32(r3); |
2154 | gen_helper_clclu(cc_op, cpu_env, t1, o->in2, t3); |
2155 | tcg_temp_free_i32(t1); |
2156 | tcg_temp_free_i32(t3); |
2157 | set_cc_static(s); |
2158 | return DISAS_NEXT; |
2159 | } |
2160 | |
2161 | static DisasJumpType op_clm(DisasContext *s, DisasOps *o) |
2162 | { |
2163 | TCGv_i32 m3 = tcg_const_i32(get_field(s->fields, m3)); |
2164 | TCGv_i32 t1 = tcg_temp_new_i32(); |
2165 | tcg_gen_extrl_i64_i32(t1, o->in1); |
2166 | gen_helper_clm(cc_op, cpu_env, t1, m3, o->in2); |
2167 | set_cc_static(s); |
2168 | tcg_temp_free_i32(t1); |
2169 | tcg_temp_free_i32(m3); |
2170 | return DISAS_NEXT; |
2171 | } |
2172 | |
2173 | static DisasJumpType op_clst(DisasContext *s, DisasOps *o) |
2174 | { |
2175 | gen_helper_clst(o->in1, cpu_env, regs[0], o->in1, o->in2); |
2176 | set_cc_static(s); |
2177 | return_low128(o->in2); |
2178 | return DISAS_NEXT; |
2179 | } |
2180 | |
2181 | static DisasJumpType op_cps(DisasContext *s, DisasOps *o) |
2182 | { |
2183 | TCGv_i64 t = tcg_temp_new_i64(); |
2184 | tcg_gen_andi_i64(t, o->in1, 0x8000000000000000ull); |
2185 | tcg_gen_andi_i64(o->out, o->in2, 0x7fffffffffffffffull); |
2186 | tcg_gen_or_i64(o->out, o->out, t); |
2187 | tcg_temp_free_i64(t); |
2188 | return DISAS_NEXT; |
2189 | } |
2190 | |
2191 | static DisasJumpType op_cs(DisasContext *s, DisasOps *o) |
2192 | { |
2193 | int d2 = get_field(s->fields, d2); |
2194 | int b2 = get_field(s->fields, b2); |
2195 | TCGv_i64 addr, cc; |
2196 | |
2197 | /* Note that in1 = R3 (new value) and |
2198 | in2 = (zero-extended) R1 (expected value). */ |
2199 | |
2200 | addr = get_address(s, 0, b2, d2); |
2201 | tcg_gen_atomic_cmpxchg_i64(o->out, addr, o->in2, o->in1, |
2202 | get_mem_index(s), s->insn->data | MO_ALIGN); |
2203 | tcg_temp_free_i64(addr); |
2204 | |
2205 | /* Are the memory and expected values (un)equal? Note that this setcond |
2206 | produces the output CC value, thus the NE sense of the test. */ |
2207 | cc = tcg_temp_new_i64(); |
2208 | tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in2, o->out); |
2209 | tcg_gen_extrl_i64_i32(cc_op, cc); |
2210 | tcg_temp_free_i64(cc); |
2211 | set_cc_static(s); |
2212 | |
2213 | return DISAS_NEXT; |
2214 | } |
2215 | |
2216 | static DisasJumpType op_cdsg(DisasContext *s, DisasOps *o) |
2217 | { |
2218 | int r1 = get_field(s->fields, r1); |
2219 | int r3 = get_field(s->fields, r3); |
2220 | int d2 = get_field(s->fields, d2); |
2221 | int b2 = get_field(s->fields, b2); |
2222 | DisasJumpType ret = DISAS_NEXT; |
2223 | TCGv_i64 addr; |
2224 | TCGv_i32 t_r1, t_r3; |
2225 | |
2226 | /* Note that R1:R1+1 = expected value and R3:R3+1 = new value. */ |
2227 | addr = get_address(s, 0, b2, d2); |
2228 | t_r1 = tcg_const_i32(r1); |
2229 | t_r3 = tcg_const_i32(r3); |
2230 | if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { |
2231 | gen_helper_cdsg(cpu_env, addr, t_r1, t_r3); |
2232 | } else if (HAVE_CMPXCHG128) { |
2233 | gen_helper_cdsg_parallel(cpu_env, addr, t_r1, t_r3); |
2234 | } else { |
2235 | gen_helper_exit_atomic(cpu_env); |
2236 | ret = DISAS_NORETURN; |
2237 | } |
2238 | tcg_temp_free_i64(addr); |
2239 | tcg_temp_free_i32(t_r1); |
2240 | tcg_temp_free_i32(t_r3); |
2241 | |
2242 | set_cc_static(s); |
2243 | return ret; |
2244 | } |
2245 | |
2246 | static DisasJumpType op_csst(DisasContext *s, DisasOps *o) |
2247 | { |
2248 | int r3 = get_field(s->fields, r3); |
2249 | TCGv_i32 t_r3 = tcg_const_i32(r3); |
2250 | |
2251 | if (tb_cflags(s->base.tb) & CF_PARALLEL) { |
2252 | gen_helper_csst_parallel(cc_op, cpu_env, t_r3, o->addr1, o->in2); |
2253 | } else { |
2254 | gen_helper_csst(cc_op, cpu_env, t_r3, o->addr1, o->in2); |
2255 | } |
2256 | tcg_temp_free_i32(t_r3); |
2257 | |
2258 | set_cc_static(s); |
2259 | return DISAS_NEXT; |
2260 | } |
2261 | |
2262 | #ifndef CONFIG_USER_ONLY |
2263 | static DisasJumpType op_csp(DisasContext *s, DisasOps *o) |
2264 | { |
2265 | MemOp mop = s->insn->data; |
2266 | TCGv_i64 addr, old, cc; |
2267 | TCGLabel *lab = gen_new_label(); |
2268 | |
2269 | /* Note that in1 = R1 (zero-extended expected value), |
2270 | out = R1 (original reg), out2 = R1+1 (new value). */ |
2271 | |
2272 | addr = tcg_temp_new_i64(); |
2273 | old = tcg_temp_new_i64(); |
2274 | tcg_gen_andi_i64(addr, o->in2, -1ULL << (mop & MO_SIZE)); |
2275 | tcg_gen_atomic_cmpxchg_i64(old, addr, o->in1, o->out2, |
2276 | get_mem_index(s), mop | MO_ALIGN); |
2277 | tcg_temp_free_i64(addr); |
2278 | |
2279 | /* Are the memory and expected values (un)equal? */ |
2280 | cc = tcg_temp_new_i64(); |
2281 | tcg_gen_setcond_i64(TCG_COND_NE, cc, o->in1, old); |
2282 | tcg_gen_extrl_i64_i32(cc_op, cc); |
2283 | |
2284 | /* Write back the output now, so that it happens before the |
2285 | following branch, so that we don't need local temps. */ |
2286 | if ((mop & MO_SIZE) == MO_32) { |
2287 | tcg_gen_deposit_i64(o->out, o->out, old, 0, 32); |
2288 | } else { |
2289 | tcg_gen_mov_i64(o->out, old); |
2290 | } |
2291 | tcg_temp_free_i64(old); |
2292 | |
2293 | /* If the comparison was equal, and the LSB of R2 was set, |
2294 | then we need to flush the TLB (for all cpus). */ |
2295 | tcg_gen_xori_i64(cc, cc, 1); |
2296 | tcg_gen_and_i64(cc, cc, o->in2); |
2297 | tcg_gen_brcondi_i64(TCG_COND_EQ, cc, 0, lab); |
2298 | tcg_temp_free_i64(cc); |
2299 | |
2300 | gen_helper_purge(cpu_env); |
2301 | gen_set_label(lab); |
2302 | |
2303 | return DISAS_NEXT; |
2304 | } |
2305 | #endif |
2306 | |
2307 | static DisasJumpType op_cvd(DisasContext *s, DisasOps *o) |
2308 | { |
2309 | TCGv_i64 t1 = tcg_temp_new_i64(); |
2310 | TCGv_i32 t2 = tcg_temp_new_i32(); |
2311 | tcg_gen_extrl_i64_i32(t2, o->in1); |
2312 | gen_helper_cvd(t1, t2); |
2313 | tcg_temp_free_i32(t2); |
2314 | tcg_gen_qemu_st64(t1, o->in2, get_mem_index(s)); |
2315 | tcg_temp_free_i64(t1); |
2316 | return DISAS_NEXT; |
2317 | } |
2318 | |
2319 | static DisasJumpType op_ct(DisasContext *s, DisasOps *o) |
2320 | { |
2321 | int m3 = get_field(s->fields, m3); |
2322 | TCGLabel *lab = gen_new_label(); |
2323 | TCGCond c; |
2324 | |
2325 | c = tcg_invert_cond(ltgt_cond[m3]); |
2326 | if (s->insn->data) { |
2327 | c = tcg_unsigned_cond(c); |
2328 | } |
2329 | tcg_gen_brcond_i64(c, o->in1, o->in2, lab); |
2330 | |
2331 | /* Trap. */ |
2332 | gen_trap(s); |
2333 | |
2334 | gen_set_label(lab); |
2335 | return DISAS_NEXT; |
2336 | } |
2337 | |
2338 | static DisasJumpType op_cuXX(DisasContext *s, DisasOps *o) |
2339 | { |
2340 | int m3 = get_field(s->fields, m3); |
2341 | int r1 = get_field(s->fields, r1); |
2342 | int r2 = get_field(s->fields, r2); |
2343 | TCGv_i32 tr1, tr2, chk; |
2344 | |
2345 | /* R1 and R2 must both be even. */ |
2346 | if ((r1 | r2) & 1) { |
2347 | gen_program_exception(s, PGM_SPECIFICATION); |
2348 | return DISAS_NORETURN; |
2349 | } |
2350 | if (!s390_has_feat(S390_FEAT_ETF3_ENH)) { |
2351 | m3 = 0; |
2352 | } |
2353 | |
2354 | tr1 = tcg_const_i32(r1); |
2355 | tr2 = tcg_const_i32(r2); |
2356 | chk = tcg_const_i32(m3); |
2357 | |
2358 | switch (s->insn->data) { |
2359 | case 12: |
2360 | gen_helper_cu12(cc_op, cpu_env, tr1, tr2, chk); |
2361 | break; |
2362 | case 14: |
2363 | gen_helper_cu14(cc_op, cpu_env, tr1, tr2, chk); |
2364 | break; |
2365 | case 21: |
2366 | gen_helper_cu21(cc_op, cpu_env, tr1, tr2, chk); |
2367 | break; |
2368 | case 24: |
2369 | gen_helper_cu24(cc_op, cpu_env, tr1, tr2, chk); |
2370 | break; |
2371 | case 41: |
2372 | gen_helper_cu41(cc_op, cpu_env, tr1, tr2, chk); |
2373 | break; |
2374 | case 42: |
2375 | gen_helper_cu42(cc_op, cpu_env, tr1, tr2, chk); |
2376 | break; |
2377 | default: |
2378 | g_assert_not_reached(); |
2379 | } |
2380 | |
2381 | tcg_temp_free_i32(tr1); |
2382 | tcg_temp_free_i32(tr2); |
2383 | tcg_temp_free_i32(chk); |
2384 | set_cc_static(s); |
2385 | return DISAS_NEXT; |
2386 | } |
2387 | |
2388 | #ifndef CONFIG_USER_ONLY |
2389 | static DisasJumpType op_diag(DisasContext *s, DisasOps *o) |
2390 | { |
2391 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
2392 | TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
2393 | TCGv_i32 func_code = tcg_const_i32(get_field(s->fields, i2)); |
2394 | |
2395 | gen_helper_diag(cpu_env, r1, r3, func_code); |
2396 | |
2397 | tcg_temp_free_i32(func_code); |
2398 | tcg_temp_free_i32(r3); |
2399 | tcg_temp_free_i32(r1); |
2400 | return DISAS_NEXT; |
2401 | } |
2402 | #endif |
2403 | |
2404 | static DisasJumpType op_divs32(DisasContext *s, DisasOps *o) |
2405 | { |
2406 | gen_helper_divs32(o->out2, cpu_env, o->in1, o->in2); |
2407 | return_low128(o->out); |
2408 | return DISAS_NEXT; |
2409 | } |
2410 | |
2411 | static DisasJumpType op_divu32(DisasContext *s, DisasOps *o) |
2412 | { |
2413 | gen_helper_divu32(o->out2, cpu_env, o->in1, o->in2); |
2414 | return_low128(o->out); |
2415 | return DISAS_NEXT; |
2416 | } |
2417 | |
2418 | static DisasJumpType op_divs64(DisasContext *s, DisasOps *o) |
2419 | { |
2420 | gen_helper_divs64(o->out2, cpu_env, o->in1, o->in2); |
2421 | return_low128(o->out); |
2422 | return DISAS_NEXT; |
2423 | } |
2424 | |
2425 | static DisasJumpType op_divu64(DisasContext *s, DisasOps *o) |
2426 | { |
2427 | gen_helper_divu64(o->out2, cpu_env, o->out, o->out2, o->in2); |
2428 | return_low128(o->out); |
2429 | return DISAS_NEXT; |
2430 | } |
2431 | |
2432 | static DisasJumpType op_deb(DisasContext *s, DisasOps *o) |
2433 | { |
2434 | gen_helper_deb(o->out, cpu_env, o->in1, o->in2); |
2435 | return DISAS_NEXT; |
2436 | } |
2437 | |
2438 | static DisasJumpType op_ddb(DisasContext *s, DisasOps *o) |
2439 | { |
2440 | gen_helper_ddb(o->out, cpu_env, o->in1, o->in2); |
2441 | return DISAS_NEXT; |
2442 | } |
2443 | |
2444 | static DisasJumpType op_dxb(DisasContext *s, DisasOps *o) |
2445 | { |
2446 | gen_helper_dxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); |
2447 | return_low128(o->out2); |
2448 | return DISAS_NEXT; |
2449 | } |
2450 | |
2451 | static DisasJumpType op_ear(DisasContext *s, DisasOps *o) |
2452 | { |
2453 | int r2 = get_field(s->fields, r2); |
2454 | tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, aregs[r2])); |
2455 | return DISAS_NEXT; |
2456 | } |
2457 | |
2458 | static DisasJumpType op_ecag(DisasContext *s, DisasOps *o) |
2459 | { |
2460 | /* No cache information provided. */ |
2461 | tcg_gen_movi_i64(o->out, -1); |
2462 | return DISAS_NEXT; |
2463 | } |
2464 | |
2465 | static DisasJumpType op_efpc(DisasContext *s, DisasOps *o) |
2466 | { |
2467 | tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, fpc)); |
2468 | return DISAS_NEXT; |
2469 | } |
2470 | |
2471 | static DisasJumpType op_epsw(DisasContext *s, DisasOps *o) |
2472 | { |
2473 | int r1 = get_field(s->fields, r1); |
2474 | int r2 = get_field(s->fields, r2); |
2475 | TCGv_i64 t = tcg_temp_new_i64(); |
2476 | |
2477 | /* Note the "subsequently" in the PoO, which implies a defined result |
2478 | if r1 == r2. Thus we cannot defer these writes to an output hook. */ |
2479 | tcg_gen_shri_i64(t, psw_mask, 32); |
2480 | store_reg32_i64(r1, t); |
2481 | if (r2 != 0) { |
2482 | store_reg32_i64(r2, psw_mask); |
2483 | } |
2484 | |
2485 | tcg_temp_free_i64(t); |
2486 | return DISAS_NEXT; |
2487 | } |
2488 | |
2489 | static DisasJumpType op_ex(DisasContext *s, DisasOps *o) |
2490 | { |
2491 | int r1 = get_field(s->fields, r1); |
2492 | TCGv_i32 ilen; |
2493 | TCGv_i64 v1; |
2494 | |
2495 | /* Nested EXECUTE is not allowed. */ |
2496 | if (unlikely(s->ex_value)) { |
2497 | gen_program_exception(s, PGM_EXECUTE); |
2498 | return DISAS_NORETURN; |
2499 | } |
2500 | |
2501 | update_psw_addr(s); |
2502 | update_cc_op(s); |
2503 | |
2504 | if (r1 == 0) { |
2505 | v1 = tcg_const_i64(0); |
2506 | } else { |
2507 | v1 = regs[r1]; |
2508 | } |
2509 | |
2510 | ilen = tcg_const_i32(s->ilen); |
2511 | gen_helper_ex(cpu_env, ilen, v1, o->in2); |
2512 | tcg_temp_free_i32(ilen); |
2513 | |
2514 | if (r1 == 0) { |
2515 | tcg_temp_free_i64(v1); |
2516 | } |
2517 | |
2518 | return DISAS_PC_CC_UPDATED; |
2519 | } |
2520 | |
2521 | static DisasJumpType op_fieb(DisasContext *s, DisasOps *o) |
2522 | { |
2523 | TCGv_i32 m34 = fpinst_extract_m34(s, false, true); |
2524 | |
2525 | if (!m34) { |
2526 | return DISAS_NORETURN; |
2527 | } |
2528 | gen_helper_fieb(o->out, cpu_env, o->in2, m34); |
2529 | tcg_temp_free_i32(m34); |
2530 | return DISAS_NEXT; |
2531 | } |
2532 | |
2533 | static DisasJumpType op_fidb(DisasContext *s, DisasOps *o) |
2534 | { |
2535 | TCGv_i32 m34 = fpinst_extract_m34(s, false, true); |
2536 | |
2537 | if (!m34) { |
2538 | return DISAS_NORETURN; |
2539 | } |
2540 | gen_helper_fidb(o->out, cpu_env, o->in2, m34); |
2541 | tcg_temp_free_i32(m34); |
2542 | return DISAS_NEXT; |
2543 | } |
2544 | |
2545 | static DisasJumpType op_fixb(DisasContext *s, DisasOps *o) |
2546 | { |
2547 | TCGv_i32 m34 = fpinst_extract_m34(s, false, true); |
2548 | |
2549 | if (!m34) { |
2550 | return DISAS_NORETURN; |
2551 | } |
2552 | gen_helper_fixb(o->out, cpu_env, o->in1, o->in2, m34); |
2553 | return_low128(o->out2); |
2554 | tcg_temp_free_i32(m34); |
2555 | return DISAS_NEXT; |
2556 | } |
2557 | |
2558 | static DisasJumpType op_flogr(DisasContext *s, DisasOps *o) |
2559 | { |
2560 | /* We'll use the original input for cc computation, since we get to |
2561 | compare that against 0, which ought to be better than comparing |
2562 | the real output against 64. It also lets cc_dst be a convenient |
2563 | temporary during our computation. */ |
2564 | gen_op_update1_cc_i64(s, CC_OP_FLOGR, o->in2); |
2565 | |
2566 | /* R1 = IN ? CLZ(IN) : 64. */ |
2567 | tcg_gen_clzi_i64(o->out, o->in2, 64); |
2568 | |
2569 | /* R1+1 = IN & ~(found bit). Note that we may attempt to shift this |
2570 | value by 64, which is undefined. But since the shift is 64 iff the |
2571 | input is zero, we still get the correct result after and'ing. */ |
2572 | tcg_gen_movi_i64(o->out2, 0x8000000000000000ull); |
2573 | tcg_gen_shr_i64(o->out2, o->out2, o->out); |
2574 | tcg_gen_andc_i64(o->out2, cc_dst, o->out2); |
2575 | return DISAS_NEXT; |
2576 | } |
2577 | |
2578 | static DisasJumpType op_icm(DisasContext *s, DisasOps *o) |
2579 | { |
2580 | int m3 = get_field(s->fields, m3); |
2581 | int pos, len, base = s->insn->data; |
2582 | TCGv_i64 tmp = tcg_temp_new_i64(); |
2583 | uint64_t ccm; |
2584 | |
2585 | switch (m3) { |
2586 | case 0xf: |
2587 | /* Effectively a 32-bit load. */ |
2588 | tcg_gen_qemu_ld32u(tmp, o->in2, get_mem_index(s)); |
2589 | len = 32; |
2590 | goto one_insert; |
2591 | |
2592 | case 0xc: |
2593 | case 0x6: |
2594 | case 0x3: |
2595 | /* Effectively a 16-bit load. */ |
2596 | tcg_gen_qemu_ld16u(tmp, o->in2, get_mem_index(s)); |
2597 | len = 16; |
2598 | goto one_insert; |
2599 | |
2600 | case 0x8: |
2601 | case 0x4: |
2602 | case 0x2: |
2603 | case 0x1: |
2604 | /* Effectively an 8-bit load. */ |
2605 | tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); |
2606 | len = 8; |
2607 | goto one_insert; |
2608 | |
2609 | one_insert: |
2610 | pos = base + ctz32(m3) * 8; |
2611 | tcg_gen_deposit_i64(o->out, o->out, tmp, pos, len); |
2612 | ccm = ((1ull << len) - 1) << pos; |
2613 | break; |
2614 | |
2615 | default: |
2616 | /* This is going to be a sequence of loads and inserts. */ |
2617 | pos = base + 32 - 8; |
2618 | ccm = 0; |
2619 | while (m3) { |
2620 | if (m3 & 0x8) { |
2621 | tcg_gen_qemu_ld8u(tmp, o->in2, get_mem_index(s)); |
2622 | tcg_gen_addi_i64(o->in2, o->in2, 1); |
2623 | tcg_gen_deposit_i64(o->out, o->out, tmp, pos, 8); |
2624 | ccm |= 0xff << pos; |
2625 | } |
2626 | m3 = (m3 << 1) & 0xf; |
2627 | pos -= 8; |
2628 | } |
2629 | break; |
2630 | } |
2631 | |
2632 | tcg_gen_movi_i64(tmp, ccm); |
2633 | gen_op_update2_cc_i64(s, CC_OP_ICM, tmp, o->out); |
2634 | tcg_temp_free_i64(tmp); |
2635 | return DISAS_NEXT; |
2636 | } |
2637 | |
2638 | static DisasJumpType op_insi(DisasContext *s, DisasOps *o) |
2639 | { |
2640 | int shift = s->insn->data & 0xff; |
2641 | int size = s->insn->data >> 8; |
2642 | tcg_gen_deposit_i64(o->out, o->in1, o->in2, shift, size); |
2643 | return DISAS_NEXT; |
2644 | } |
2645 | |
2646 | static DisasJumpType op_ipm(DisasContext *s, DisasOps *o) |
2647 | { |
2648 | TCGv_i64 t1, t2; |
2649 | |
2650 | gen_op_calc_cc(s); |
2651 | t1 = tcg_temp_new_i64(); |
2652 | tcg_gen_extract_i64(t1, psw_mask, 40, 4); |
2653 | t2 = tcg_temp_new_i64(); |
2654 | tcg_gen_extu_i32_i64(t2, cc_op); |
2655 | tcg_gen_deposit_i64(t1, t1, t2, 4, 60); |
2656 | tcg_gen_deposit_i64(o->out, o->out, t1, 24, 8); |
2657 | tcg_temp_free_i64(t1); |
2658 | tcg_temp_free_i64(t2); |
2659 | return DISAS_NEXT; |
2660 | } |
2661 | |
2662 | #ifndef CONFIG_USER_ONLY |
2663 | static DisasJumpType op_idte(DisasContext *s, DisasOps *o) |
2664 | { |
2665 | TCGv_i32 m4; |
2666 | |
2667 | if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) { |
2668 | m4 = tcg_const_i32(get_field(s->fields, m4)); |
2669 | } else { |
2670 | m4 = tcg_const_i32(0); |
2671 | } |
2672 | gen_helper_idte(cpu_env, o->in1, o->in2, m4); |
2673 | tcg_temp_free_i32(m4); |
2674 | return DISAS_NEXT; |
2675 | } |
2676 | |
2677 | static DisasJumpType op_ipte(DisasContext *s, DisasOps *o) |
2678 | { |
2679 | TCGv_i32 m4; |
2680 | |
2681 | if (s390_has_feat(S390_FEAT_LOCAL_TLB_CLEARING)) { |
2682 | m4 = tcg_const_i32(get_field(s->fields, m4)); |
2683 | } else { |
2684 | m4 = tcg_const_i32(0); |
2685 | } |
2686 | gen_helper_ipte(cpu_env, o->in1, o->in2, m4); |
2687 | tcg_temp_free_i32(m4); |
2688 | return DISAS_NEXT; |
2689 | } |
2690 | |
2691 | static DisasJumpType op_iske(DisasContext *s, DisasOps *o) |
2692 | { |
2693 | gen_helper_iske(o->out, cpu_env, o->in2); |
2694 | return DISAS_NEXT; |
2695 | } |
2696 | #endif |
2697 | |
2698 | static DisasJumpType op_msa(DisasContext *s, DisasOps *o) |
2699 | { |
2700 | int r1 = have_field(s->fields, r1) ? get_field(s->fields, r1) : 0; |
2701 | int r2 = have_field(s->fields, r2) ? get_field(s->fields, r2) : 0; |
2702 | int r3 = have_field(s->fields, r3) ? get_field(s->fields, r3) : 0; |
2703 | TCGv_i32 t_r1, t_r2, t_r3, type; |
2704 | |
2705 | switch (s->insn->data) { |
2706 | case S390_FEAT_TYPE_KMCTR: |
2707 | if (r3 & 1 || !r3) { |
2708 | gen_program_exception(s, PGM_SPECIFICATION); |
2709 | return DISAS_NORETURN; |
2710 | } |
2711 | /* FALL THROUGH */ |
2712 | case S390_FEAT_TYPE_PPNO: |
2713 | case S390_FEAT_TYPE_KMF: |
2714 | case S390_FEAT_TYPE_KMC: |
2715 | case S390_FEAT_TYPE_KMO: |
2716 | case S390_FEAT_TYPE_KM: |
2717 | if (r1 & 1 || !r1) { |
2718 | gen_program_exception(s, PGM_SPECIFICATION); |
2719 | return DISAS_NORETURN; |
2720 | } |
2721 | /* FALL THROUGH */ |
2722 | case S390_FEAT_TYPE_KMAC: |
2723 | case S390_FEAT_TYPE_KIMD: |
2724 | case S390_FEAT_TYPE_KLMD: |
2725 | if (r2 & 1 || !r2) { |
2726 | gen_program_exception(s, PGM_SPECIFICATION); |
2727 | return DISAS_NORETURN; |
2728 | } |
2729 | /* FALL THROUGH */ |
2730 | case S390_FEAT_TYPE_PCKMO: |
2731 | case S390_FEAT_TYPE_PCC: |
2732 | break; |
2733 | default: |
2734 | g_assert_not_reached(); |
2735 | }; |
2736 | |
2737 | t_r1 = tcg_const_i32(r1); |
2738 | t_r2 = tcg_const_i32(r2); |
2739 | t_r3 = tcg_const_i32(r3); |
2740 | type = tcg_const_i32(s->insn->data); |
2741 | gen_helper_msa(cc_op, cpu_env, t_r1, t_r2, t_r3, type); |
2742 | set_cc_static(s); |
2743 | tcg_temp_free_i32(t_r1); |
2744 | tcg_temp_free_i32(t_r2); |
2745 | tcg_temp_free_i32(t_r3); |
2746 | tcg_temp_free_i32(type); |
2747 | return DISAS_NEXT; |
2748 | } |
2749 | |
2750 | static DisasJumpType op_keb(DisasContext *s, DisasOps *o) |
2751 | { |
2752 | gen_helper_keb(cc_op, cpu_env, o->in1, o->in2); |
2753 | set_cc_static(s); |
2754 | return DISAS_NEXT; |
2755 | } |
2756 | |
2757 | static DisasJumpType op_kdb(DisasContext *s, DisasOps *o) |
2758 | { |
2759 | gen_helper_kdb(cc_op, cpu_env, o->in1, o->in2); |
2760 | set_cc_static(s); |
2761 | return DISAS_NEXT; |
2762 | } |
2763 | |
2764 | static DisasJumpType op_kxb(DisasContext *s, DisasOps *o) |
2765 | { |
2766 | gen_helper_kxb(cc_op, cpu_env, o->out, o->out2, o->in1, o->in2); |
2767 | set_cc_static(s); |
2768 | return DISAS_NEXT; |
2769 | } |
2770 | |
2771 | static DisasJumpType op_laa(DisasContext *s, DisasOps *o) |
2772 | { |
2773 | /* The real output is indeed the original value in memory; |
2774 | recompute the addition for the computation of CC. */ |
2775 | tcg_gen_atomic_fetch_add_i64(o->in2, o->in2, o->in1, get_mem_index(s), |
2776 | s->insn->data | MO_ALIGN); |
2777 | /* However, we need to recompute the addition for setting CC. */ |
2778 | tcg_gen_add_i64(o->out, o->in1, o->in2); |
2779 | return DISAS_NEXT; |
2780 | } |
2781 | |
2782 | static DisasJumpType op_lan(DisasContext *s, DisasOps *o) |
2783 | { |
2784 | /* The real output is indeed the original value in memory; |
2785 | recompute the addition for the computation of CC. */ |
2786 | tcg_gen_atomic_fetch_and_i64(o->in2, o->in2, o->in1, get_mem_index(s), |
2787 | s->insn->data | MO_ALIGN); |
2788 | /* However, we need to recompute the operation for setting CC. */ |
2789 | tcg_gen_and_i64(o->out, o->in1, o->in2); |
2790 | return DISAS_NEXT; |
2791 | } |
2792 | |
2793 | static DisasJumpType op_lao(DisasContext *s, DisasOps *o) |
2794 | { |
2795 | /* The real output is indeed the original value in memory; |
2796 | recompute the addition for the computation of CC. */ |
2797 | tcg_gen_atomic_fetch_or_i64(o->in2, o->in2, o->in1, get_mem_index(s), |
2798 | s->insn->data | MO_ALIGN); |
2799 | /* However, we need to recompute the operation for setting CC. */ |
2800 | tcg_gen_or_i64(o->out, o->in1, o->in2); |
2801 | return DISAS_NEXT; |
2802 | } |
2803 | |
2804 | static DisasJumpType op_lax(DisasContext *s, DisasOps *o) |
2805 | { |
2806 | /* The real output is indeed the original value in memory; |
2807 | recompute the addition for the computation of CC. */ |
2808 | tcg_gen_atomic_fetch_xor_i64(o->in2, o->in2, o->in1, get_mem_index(s), |
2809 | s->insn->data | MO_ALIGN); |
2810 | /* However, we need to recompute the operation for setting CC. */ |
2811 | tcg_gen_xor_i64(o->out, o->in1, o->in2); |
2812 | return DISAS_NEXT; |
2813 | } |
2814 | |
2815 | static DisasJumpType op_ldeb(DisasContext *s, DisasOps *o) |
2816 | { |
2817 | gen_helper_ldeb(o->out, cpu_env, o->in2); |
2818 | return DISAS_NEXT; |
2819 | } |
2820 | |
2821 | static DisasJumpType op_ledb(DisasContext *s, DisasOps *o) |
2822 | { |
2823 | TCGv_i32 m34 = fpinst_extract_m34(s, true, true); |
2824 | |
2825 | if (!m34) { |
2826 | return DISAS_NORETURN; |
2827 | } |
2828 | gen_helper_ledb(o->out, cpu_env, o->in2, m34); |
2829 | tcg_temp_free_i32(m34); |
2830 | return DISAS_NEXT; |
2831 | } |
2832 | |
2833 | static DisasJumpType op_ldxb(DisasContext *s, DisasOps *o) |
2834 | { |
2835 | TCGv_i32 m34 = fpinst_extract_m34(s, true, true); |
2836 | |
2837 | if (!m34) { |
2838 | return DISAS_NORETURN; |
2839 | } |
2840 | gen_helper_ldxb(o->out, cpu_env, o->in1, o->in2, m34); |
2841 | tcg_temp_free_i32(m34); |
2842 | return DISAS_NEXT; |
2843 | } |
2844 | |
2845 | static DisasJumpType op_lexb(DisasContext *s, DisasOps *o) |
2846 | { |
2847 | TCGv_i32 m34 = fpinst_extract_m34(s, true, true); |
2848 | |
2849 | if (!m34) { |
2850 | return DISAS_NORETURN; |
2851 | } |
2852 | gen_helper_lexb(o->out, cpu_env, o->in1, o->in2, m34); |
2853 | tcg_temp_free_i32(m34); |
2854 | return DISAS_NEXT; |
2855 | } |
2856 | |
2857 | static DisasJumpType op_lxdb(DisasContext *s, DisasOps *o) |
2858 | { |
2859 | gen_helper_lxdb(o->out, cpu_env, o->in2); |
2860 | return_low128(o->out2); |
2861 | return DISAS_NEXT; |
2862 | } |
2863 | |
2864 | static DisasJumpType op_lxeb(DisasContext *s, DisasOps *o) |
2865 | { |
2866 | gen_helper_lxeb(o->out, cpu_env, o->in2); |
2867 | return_low128(o->out2); |
2868 | return DISAS_NEXT; |
2869 | } |
2870 | |
2871 | static DisasJumpType op_lde(DisasContext *s, DisasOps *o) |
2872 | { |
2873 | tcg_gen_shli_i64(o->out, o->in2, 32); |
2874 | return DISAS_NEXT; |
2875 | } |
2876 | |
2877 | static DisasJumpType op_llgt(DisasContext *s, DisasOps *o) |
2878 | { |
2879 | tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff); |
2880 | return DISAS_NEXT; |
2881 | } |
2882 | |
2883 | static DisasJumpType op_ld8s(DisasContext *s, DisasOps *o) |
2884 | { |
2885 | tcg_gen_qemu_ld8s(o->out, o->in2, get_mem_index(s)); |
2886 | return DISAS_NEXT; |
2887 | } |
2888 | |
2889 | static DisasJumpType op_ld8u(DisasContext *s, DisasOps *o) |
2890 | { |
2891 | tcg_gen_qemu_ld8u(o->out, o->in2, get_mem_index(s)); |
2892 | return DISAS_NEXT; |
2893 | } |
2894 | |
2895 | static DisasJumpType op_ld16s(DisasContext *s, DisasOps *o) |
2896 | { |
2897 | tcg_gen_qemu_ld16s(o->out, o->in2, get_mem_index(s)); |
2898 | return DISAS_NEXT; |
2899 | } |
2900 | |
2901 | static DisasJumpType op_ld16u(DisasContext *s, DisasOps *o) |
2902 | { |
2903 | tcg_gen_qemu_ld16u(o->out, o->in2, get_mem_index(s)); |
2904 | return DISAS_NEXT; |
2905 | } |
2906 | |
2907 | static DisasJumpType op_ld32s(DisasContext *s, DisasOps *o) |
2908 | { |
2909 | tcg_gen_qemu_ld32s(o->out, o->in2, get_mem_index(s)); |
2910 | return DISAS_NEXT; |
2911 | } |
2912 | |
2913 | static DisasJumpType op_ld32u(DisasContext *s, DisasOps *o) |
2914 | { |
2915 | tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s)); |
2916 | return DISAS_NEXT; |
2917 | } |
2918 | |
2919 | static DisasJumpType op_ld64(DisasContext *s, DisasOps *o) |
2920 | { |
2921 | tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s)); |
2922 | return DISAS_NEXT; |
2923 | } |
2924 | |
2925 | static DisasJumpType op_lat(DisasContext *s, DisasOps *o) |
2926 | { |
2927 | TCGLabel *lab = gen_new_label(); |
2928 | store_reg32_i64(get_field(s->fields, r1), o->in2); |
2929 | /* The value is stored even in case of trap. */ |
2930 | tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab); |
2931 | gen_trap(s); |
2932 | gen_set_label(lab); |
2933 | return DISAS_NEXT; |
2934 | } |
2935 | |
2936 | static DisasJumpType op_lgat(DisasContext *s, DisasOps *o) |
2937 | { |
2938 | TCGLabel *lab = gen_new_label(); |
2939 | tcg_gen_qemu_ld64(o->out, o->in2, get_mem_index(s)); |
2940 | /* The value is stored even in case of trap. */ |
2941 | tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); |
2942 | gen_trap(s); |
2943 | gen_set_label(lab); |
2944 | return DISAS_NEXT; |
2945 | } |
2946 | |
2947 | static DisasJumpType op_lfhat(DisasContext *s, DisasOps *o) |
2948 | { |
2949 | TCGLabel *lab = gen_new_label(); |
2950 | store_reg32h_i64(get_field(s->fields, r1), o->in2); |
2951 | /* The value is stored even in case of trap. */ |
2952 | tcg_gen_brcondi_i64(TCG_COND_NE, o->in2, 0, lab); |
2953 | gen_trap(s); |
2954 | gen_set_label(lab); |
2955 | return DISAS_NEXT; |
2956 | } |
2957 | |
2958 | static DisasJumpType op_llgfat(DisasContext *s, DisasOps *o) |
2959 | { |
2960 | TCGLabel *lab = gen_new_label(); |
2961 | tcg_gen_qemu_ld32u(o->out, o->in2, get_mem_index(s)); |
2962 | /* The value is stored even in case of trap. */ |
2963 | tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); |
2964 | gen_trap(s); |
2965 | gen_set_label(lab); |
2966 | return DISAS_NEXT; |
2967 | } |
2968 | |
2969 | static DisasJumpType op_llgtat(DisasContext *s, DisasOps *o) |
2970 | { |
2971 | TCGLabel *lab = gen_new_label(); |
2972 | tcg_gen_andi_i64(o->out, o->in2, 0x7fffffff); |
2973 | /* The value is stored even in case of trap. */ |
2974 | tcg_gen_brcondi_i64(TCG_COND_NE, o->out, 0, lab); |
2975 | gen_trap(s); |
2976 | gen_set_label(lab); |
2977 | return DISAS_NEXT; |
2978 | } |
2979 | |
2980 | static DisasJumpType op_loc(DisasContext *s, DisasOps *o) |
2981 | { |
2982 | DisasCompare c; |
2983 | |
2984 | disas_jcc(s, &c, get_field(s->fields, m3)); |
2985 | |
2986 | if (c.is_64) { |
2987 | tcg_gen_movcond_i64(c.cond, o->out, c.u.s64.a, c.u.s64.b, |
2988 | o->in2, o->in1); |
2989 | free_compare(&c); |
2990 | } else { |
2991 | TCGv_i32 t32 = tcg_temp_new_i32(); |
2992 | TCGv_i64 t, z; |
2993 | |
2994 | tcg_gen_setcond_i32(c.cond, t32, c.u.s32.a, c.u.s32.b); |
2995 | free_compare(&c); |
2996 | |
2997 | t = tcg_temp_new_i64(); |
2998 | tcg_gen_extu_i32_i64(t, t32); |
2999 | tcg_temp_free_i32(t32); |
3000 | |
3001 | z = tcg_const_i64(0); |
3002 | tcg_gen_movcond_i64(TCG_COND_NE, o->out, t, z, o->in2, o->in1); |
3003 | tcg_temp_free_i64(t); |
3004 | tcg_temp_free_i64(z); |
3005 | } |
3006 | |
3007 | return DISAS_NEXT; |
3008 | } |
3009 | |
3010 | #ifndef CONFIG_USER_ONLY |
3011 | static DisasJumpType op_lctl(DisasContext *s, DisasOps *o) |
3012 | { |
3013 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
3014 | TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
3015 | gen_helper_lctl(cpu_env, r1, o->in2, r3); |
3016 | tcg_temp_free_i32(r1); |
3017 | tcg_temp_free_i32(r3); |
3018 | /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ |
3019 | return DISAS_PC_STALE_NOCHAIN; |
3020 | } |
3021 | |
3022 | static DisasJumpType op_lctlg(DisasContext *s, DisasOps *o) |
3023 | { |
3024 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
3025 | TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
3026 | gen_helper_lctlg(cpu_env, r1, o->in2, r3); |
3027 | tcg_temp_free_i32(r1); |
3028 | tcg_temp_free_i32(r3); |
3029 | /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ |
3030 | return DISAS_PC_STALE_NOCHAIN; |
3031 | } |
3032 | |
3033 | static DisasJumpType op_lra(DisasContext *s, DisasOps *o) |
3034 | { |
3035 | gen_helper_lra(o->out, cpu_env, o->in2); |
3036 | set_cc_static(s); |
3037 | return DISAS_NEXT; |
3038 | } |
3039 | |
3040 | static DisasJumpType op_lpp(DisasContext *s, DisasOps *o) |
3041 | { |
3042 | tcg_gen_st_i64(o->in2, cpu_env, offsetof(CPUS390XState, pp)); |
3043 | return DISAS_NEXT; |
3044 | } |
3045 | |
3046 | static DisasJumpType op_lpsw(DisasContext *s, DisasOps *o) |
3047 | { |
3048 | TCGv_i64 t1, t2; |
3049 | |
3050 | per_breaking_event(s); |
3051 | |
3052 | t1 = tcg_temp_new_i64(); |
3053 | t2 = tcg_temp_new_i64(); |
3054 | tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), |
3055 | MO_TEUL | MO_ALIGN_8); |
3056 | tcg_gen_addi_i64(o->in2, o->in2, 4); |
3057 | tcg_gen_qemu_ld32u(t2, o->in2, get_mem_index(s)); |
3058 | /* Convert the 32-bit PSW_MASK into the 64-bit PSW_MASK. */ |
3059 | tcg_gen_shli_i64(t1, t1, 32); |
3060 | gen_helper_load_psw(cpu_env, t1, t2); |
3061 | tcg_temp_free_i64(t1); |
3062 | tcg_temp_free_i64(t2); |
3063 | return DISAS_NORETURN; |
3064 | } |
3065 | |
3066 | static DisasJumpType op_lpswe(DisasContext *s, DisasOps *o) |
3067 | { |
3068 | TCGv_i64 t1, t2; |
3069 | |
3070 | per_breaking_event(s); |
3071 | |
3072 | t1 = tcg_temp_new_i64(); |
3073 | t2 = tcg_temp_new_i64(); |
3074 | tcg_gen_qemu_ld_i64(t1, o->in2, get_mem_index(s), |
3075 | MO_TEQ | MO_ALIGN_8); |
3076 | tcg_gen_addi_i64(o->in2, o->in2, 8); |
3077 | tcg_gen_qemu_ld64(t2, o->in2, get_mem_index(s)); |
3078 | gen_helper_load_psw(cpu_env, t1, t2); |
3079 | tcg_temp_free_i64(t1); |
3080 | tcg_temp_free_i64(t2); |
3081 | return DISAS_NORETURN; |
3082 | } |
3083 | #endif |
3084 | |
3085 | static DisasJumpType op_lam(DisasContext *s, DisasOps *o) |
3086 | { |
3087 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
3088 | TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
3089 | gen_helper_lam(cpu_env, r1, o->in2, r3); |
3090 | tcg_temp_free_i32(r1); |
3091 | tcg_temp_free_i32(r3); |
3092 | return DISAS_NEXT; |
3093 | } |
3094 | |
3095 | static DisasJumpType op_lm32(DisasContext *s, DisasOps *o) |
3096 | { |
3097 | int r1 = get_field(s->fields, r1); |
3098 | int r3 = get_field(s->fields, r3); |
3099 | TCGv_i64 t1, t2; |
3100 | |
3101 | /* Only one register to read. */ |
3102 | t1 = tcg_temp_new_i64(); |
3103 | if (unlikely(r1 == r3)) { |
3104 | tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); |
3105 | store_reg32_i64(r1, t1); |
3106 | tcg_temp_free(t1); |
3107 | return DISAS_NEXT; |
3108 | } |
3109 | |
3110 | /* First load the values of the first and last registers to trigger |
3111 | possible page faults. */ |
3112 | t2 = tcg_temp_new_i64(); |
3113 | tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); |
3114 | tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); |
3115 | tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); |
3116 | store_reg32_i64(r1, t1); |
3117 | store_reg32_i64(r3, t2); |
3118 | |
3119 | /* Only two registers to read. */ |
3120 | if (((r1 + 1) & 15) == r3) { |
3121 | tcg_temp_free(t2); |
3122 | tcg_temp_free(t1); |
3123 | return DISAS_NEXT; |
3124 | } |
3125 | |
3126 | /* Then load the remaining registers. Page fault can't occur. */ |
3127 | r3 = (r3 - 1) & 15; |
3128 | tcg_gen_movi_i64(t2, 4); |
3129 | while (r1 != r3) { |
3130 | r1 = (r1 + 1) & 15; |
3131 | tcg_gen_add_i64(o->in2, o->in2, t2); |
3132 | tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); |
3133 | store_reg32_i64(r1, t1); |
3134 | } |
3135 | tcg_temp_free(t2); |
3136 | tcg_temp_free(t1); |
3137 | |
3138 | return DISAS_NEXT; |
3139 | } |
3140 | |
3141 | static DisasJumpType op_lmh(DisasContext *s, DisasOps *o) |
3142 | { |
3143 | int r1 = get_field(s->fields, r1); |
3144 | int r3 = get_field(s->fields, r3); |
3145 | TCGv_i64 t1, t2; |
3146 | |
3147 | /* Only one register to read. */ |
3148 | t1 = tcg_temp_new_i64(); |
3149 | if (unlikely(r1 == r3)) { |
3150 | tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); |
3151 | store_reg32h_i64(r1, t1); |
3152 | tcg_temp_free(t1); |
3153 | return DISAS_NEXT; |
3154 | } |
3155 | |
3156 | /* First load the values of the first and last registers to trigger |
3157 | possible page faults. */ |
3158 | t2 = tcg_temp_new_i64(); |
3159 | tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); |
3160 | tcg_gen_addi_i64(t2, o->in2, 4 * ((r3 - r1) & 15)); |
3161 | tcg_gen_qemu_ld32u(t2, t2, get_mem_index(s)); |
3162 | store_reg32h_i64(r1, t1); |
3163 | store_reg32h_i64(r3, t2); |
3164 | |
3165 | /* Only two registers to read. */ |
3166 | if (((r1 + 1) & 15) == r3) { |
3167 | tcg_temp_free(t2); |
3168 | tcg_temp_free(t1); |
3169 | return DISAS_NEXT; |
3170 | } |
3171 | |
3172 | /* Then load the remaining registers. Page fault can't occur. */ |
3173 | r3 = (r3 - 1) & 15; |
3174 | tcg_gen_movi_i64(t2, 4); |
3175 | while (r1 != r3) { |
3176 | r1 = (r1 + 1) & 15; |
3177 | tcg_gen_add_i64(o->in2, o->in2, t2); |
3178 | tcg_gen_qemu_ld32u(t1, o->in2, get_mem_index(s)); |
3179 | store_reg32h_i64(r1, t1); |
3180 | } |
3181 | tcg_temp_free(t2); |
3182 | tcg_temp_free(t1); |
3183 | |
3184 | return DISAS_NEXT; |
3185 | } |
3186 | |
3187 | static DisasJumpType op_lm64(DisasContext *s, DisasOps *o) |
3188 | { |
3189 | int r1 = get_field(s->fields, r1); |
3190 | int r3 = get_field(s->fields, r3); |
3191 | TCGv_i64 t1, t2; |
3192 | |
3193 | /* Only one register to read. */ |
3194 | if (unlikely(r1 == r3)) { |
3195 | tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); |
3196 | return DISAS_NEXT; |
3197 | } |
3198 | |
3199 | /* First load the values of the first and last registers to trigger |
3200 | possible page faults. */ |
3201 | t1 = tcg_temp_new_i64(); |
3202 | t2 = tcg_temp_new_i64(); |
3203 | tcg_gen_qemu_ld64(t1, o->in2, get_mem_index(s)); |
3204 | tcg_gen_addi_i64(t2, o->in2, 8 * ((r3 - r1) & 15)); |
3205 | tcg_gen_qemu_ld64(regs[r3], t2, get_mem_index(s)); |
3206 | tcg_gen_mov_i64(regs[r1], t1); |
3207 | tcg_temp_free(t2); |
3208 | |
3209 | /* Only two registers to read. */ |
3210 | if (((r1 + 1) & 15) == r3) { |
3211 | tcg_temp_free(t1); |
3212 | return DISAS_NEXT; |
3213 | } |
3214 | |
3215 | /* Then load the remaining registers. Page fault can't occur. */ |
3216 | r3 = (r3 - 1) & 15; |
3217 | tcg_gen_movi_i64(t1, 8); |
3218 | while (r1 != r3) { |
3219 | r1 = (r1 + 1) & 15; |
3220 | tcg_gen_add_i64(o->in2, o->in2, t1); |
3221 | tcg_gen_qemu_ld64(regs[r1], o->in2, get_mem_index(s)); |
3222 | } |
3223 | tcg_temp_free(t1); |
3224 | |
3225 | return DISAS_NEXT; |
3226 | } |
3227 | |
3228 | static DisasJumpType op_lpd(DisasContext *s, DisasOps *o) |
3229 | { |
3230 | TCGv_i64 a1, a2; |
3231 | MemOp mop = s->insn->data; |
3232 | |
3233 | /* In a parallel context, stop the world and single step. */ |
3234 | if (tb_cflags(s->base.tb) & CF_PARALLEL) { |
3235 | update_psw_addr(s); |
3236 | update_cc_op(s); |
3237 | gen_exception(EXCP_ATOMIC); |
3238 | return DISAS_NORETURN; |
3239 | } |
3240 | |
3241 | /* In a serial context, perform the two loads ... */ |
3242 | a1 = get_address(s, 0, get_field(s->fields, b1), get_field(s->fields, d1)); |
3243 | a2 = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2)); |
3244 | tcg_gen_qemu_ld_i64(o->out, a1, get_mem_index(s), mop | MO_ALIGN); |
3245 | tcg_gen_qemu_ld_i64(o->out2, a2, get_mem_index(s), mop | MO_ALIGN); |
3246 | tcg_temp_free_i64(a1); |
3247 | tcg_temp_free_i64(a2); |
3248 | |
3249 | /* ... and indicate that we performed them while interlocked. */ |
3250 | gen_op_movi_cc(s, 0); |
3251 | return DISAS_NEXT; |
3252 | } |
3253 | |
3254 | static DisasJumpType op_lpq(DisasContext *s, DisasOps *o) |
3255 | { |
3256 | if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { |
3257 | gen_helper_lpq(o->out, cpu_env, o->in2); |
3258 | } else if (HAVE_ATOMIC128) { |
3259 | gen_helper_lpq_parallel(o->out, cpu_env, o->in2); |
3260 | } else { |
3261 | gen_helper_exit_atomic(cpu_env); |
3262 | return DISAS_NORETURN; |
3263 | } |
3264 | return_low128(o->out2); |
3265 | return DISAS_NEXT; |
3266 | } |
3267 | |
3268 | #ifndef CONFIG_USER_ONLY |
3269 | static DisasJumpType op_lura(DisasContext *s, DisasOps *o) |
3270 | { |
3271 | gen_helper_lura(o->out, cpu_env, o->in2); |
3272 | return DISAS_NEXT; |
3273 | } |
3274 | |
3275 | static DisasJumpType op_lurag(DisasContext *s, DisasOps *o) |
3276 | { |
3277 | gen_helper_lurag(o->out, cpu_env, o->in2); |
3278 | return DISAS_NEXT; |
3279 | } |
3280 | #endif |
3281 | |
3282 | static DisasJumpType op_lzrb(DisasContext *s, DisasOps *o) |
3283 | { |
3284 | tcg_gen_andi_i64(o->out, o->in2, -256); |
3285 | return DISAS_NEXT; |
3286 | } |
3287 | |
3288 | static DisasJumpType op_lcbb(DisasContext *s, DisasOps *o) |
3289 | { |
3290 | const int64_t block_size = (1ull << (get_field(s->fields, m3) + 6)); |
3291 | |
3292 | if (get_field(s->fields, m3) > 6) { |
3293 | gen_program_exception(s, PGM_SPECIFICATION); |
3294 | return DISAS_NORETURN; |
3295 | } |
3296 | |
3297 | tcg_gen_ori_i64(o->addr1, o->addr1, -block_size); |
3298 | tcg_gen_neg_i64(o->addr1, o->addr1); |
3299 | tcg_gen_movi_i64(o->out, 16); |
3300 | tcg_gen_umin_i64(o->out, o->out, o->addr1); |
3301 | gen_op_update1_cc_i64(s, CC_OP_LCBB, o->out); |
3302 | return DISAS_NEXT; |
3303 | } |
3304 | |
3305 | static DisasJumpType op_mov2(DisasContext *s, DisasOps *o) |
3306 | { |
3307 | o->out = o->in2; |
3308 | o->g_out = o->g_in2; |
3309 | o->in2 = NULL; |
3310 | o->g_in2 = false; |
3311 | return DISAS_NEXT; |
3312 | } |
3313 | |
3314 | static DisasJumpType op_mov2e(DisasContext *s, DisasOps *o) |
3315 | { |
3316 | int b2 = get_field(s->fields, b2); |
3317 | TCGv ar1 = tcg_temp_new_i64(); |
3318 | |
3319 | o->out = o->in2; |
3320 | o->g_out = o->g_in2; |
3321 | o->in2 = NULL; |
3322 | o->g_in2 = false; |
3323 | |
3324 | switch (s->base.tb->flags & FLAG_MASK_ASC) { |
3325 | case PSW_ASC_PRIMARY >> FLAG_MASK_PSW_SHIFT: |
3326 | tcg_gen_movi_i64(ar1, 0); |
3327 | break; |
3328 | case PSW_ASC_ACCREG >> FLAG_MASK_PSW_SHIFT: |
3329 | tcg_gen_movi_i64(ar1, 1); |
3330 | break; |
3331 | case PSW_ASC_SECONDARY >> FLAG_MASK_PSW_SHIFT: |
3332 | if (b2) { |
3333 | tcg_gen_ld32u_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[b2])); |
3334 | } else { |
3335 | tcg_gen_movi_i64(ar1, 0); |
3336 | } |
3337 | break; |
3338 | case PSW_ASC_HOME >> FLAG_MASK_PSW_SHIFT: |
3339 | tcg_gen_movi_i64(ar1, 2); |
3340 | break; |
3341 | } |
3342 | |
3343 | tcg_gen_st32_i64(ar1, cpu_env, offsetof(CPUS390XState, aregs[1])); |
3344 | tcg_temp_free_i64(ar1); |
3345 | |
3346 | return DISAS_NEXT; |
3347 | } |
3348 | |
3349 | static DisasJumpType op_movx(DisasContext *s, DisasOps *o) |
3350 | { |
3351 | o->out = o->in1; |
3352 | o->out2 = o->in2; |
3353 | o->g_out = o->g_in1; |
3354 | o->g_out2 = o->g_in2; |
3355 | o->in1 = NULL; |
3356 | o->in2 = NULL; |
3357 | o->g_in1 = o->g_in2 = false; |
3358 | return DISAS_NEXT; |
3359 | } |
3360 | |
3361 | static DisasJumpType op_mvc(DisasContext *s, DisasOps *o) |
3362 | { |
3363 | TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); |
3364 | gen_helper_mvc(cpu_env, l, o->addr1, o->in2); |
3365 | tcg_temp_free_i32(l); |
3366 | return DISAS_NEXT; |
3367 | } |
3368 | |
3369 | static DisasJumpType op_mvcin(DisasContext *s, DisasOps *o) |
3370 | { |
3371 | TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); |
3372 | gen_helper_mvcin(cpu_env, l, o->addr1, o->in2); |
3373 | tcg_temp_free_i32(l); |
3374 | return DISAS_NEXT; |
3375 | } |
3376 | |
3377 | static DisasJumpType op_mvcl(DisasContext *s, DisasOps *o) |
3378 | { |
3379 | int r1 = get_field(s->fields, r1); |
3380 | int r2 = get_field(s->fields, r2); |
3381 | TCGv_i32 t1, t2; |
3382 | |
3383 | /* r1 and r2 must be even. */ |
3384 | if (r1 & 1 || r2 & 1) { |
3385 | gen_program_exception(s, PGM_SPECIFICATION); |
3386 | return DISAS_NORETURN; |
3387 | } |
3388 | |
3389 | t1 = tcg_const_i32(r1); |
3390 | t2 = tcg_const_i32(r2); |
3391 | gen_helper_mvcl(cc_op, cpu_env, t1, t2); |
3392 | tcg_temp_free_i32(t1); |
3393 | tcg_temp_free_i32(t2); |
3394 | set_cc_static(s); |
3395 | return DISAS_NEXT; |
3396 | } |
3397 | |
3398 | static DisasJumpType op_mvcle(DisasContext *s, DisasOps *o) |
3399 | { |
3400 | int r1 = get_field(s->fields, r1); |
3401 | int r3 = get_field(s->fields, r3); |
3402 | TCGv_i32 t1, t3; |
3403 | |
3404 | /* r1 and r3 must be even. */ |
3405 | if (r1 & 1 || r3 & 1) { |
3406 | gen_program_exception(s, PGM_SPECIFICATION); |
3407 | return DISAS_NORETURN; |
3408 | } |
3409 | |
3410 | t1 = tcg_const_i32(r1); |
3411 | t3 = tcg_const_i32(r3); |
3412 | gen_helper_mvcle(cc_op, cpu_env, t1, o->in2, t3); |
3413 | tcg_temp_free_i32(t1); |
3414 | tcg_temp_free_i32(t3); |
3415 | set_cc_static(s); |
3416 | return DISAS_NEXT; |
3417 | } |
3418 | |
3419 | static DisasJumpType op_mvclu(DisasContext *s, DisasOps *o) |
3420 | { |
3421 | int r1 = get_field(s->fields, r1); |
3422 | int r3 = get_field(s->fields, r3); |
3423 | TCGv_i32 t1, t3; |
3424 | |
3425 | /* r1 and r3 must be even. */ |
3426 | if (r1 & 1 || r3 & 1) { |
3427 | gen_program_exception(s, PGM_SPECIFICATION); |
3428 | return DISAS_NORETURN; |
3429 | } |
3430 | |
3431 | t1 = tcg_const_i32(r1); |
3432 | t3 = tcg_const_i32(r3); |
3433 | gen_helper_mvclu(cc_op, cpu_env, t1, o->in2, t3); |
3434 | tcg_temp_free_i32(t1); |
3435 | tcg_temp_free_i32(t3); |
3436 | set_cc_static(s); |
3437 | return DISAS_NEXT; |
3438 | } |
3439 | |
3440 | static DisasJumpType op_mvcos(DisasContext *s, DisasOps *o) |
3441 | { |
3442 | int r3 = get_field(s->fields, r3); |
3443 | gen_helper_mvcos(cc_op, cpu_env, o->addr1, o->in2, regs[r3]); |
3444 | set_cc_static(s); |
3445 | return DISAS_NEXT; |
3446 | } |
3447 | |
3448 | #ifndef CONFIG_USER_ONLY |
3449 | static DisasJumpType op_mvcp(DisasContext *s, DisasOps *o) |
3450 | { |
3451 | int r1 = get_field(s->fields, l1); |
3452 | gen_helper_mvcp(cc_op, cpu_env, regs[r1], o->addr1, o->in2); |
3453 | set_cc_static(s); |
3454 | return DISAS_NEXT; |
3455 | } |
3456 | |
3457 | static DisasJumpType op_mvcs(DisasContext *s, DisasOps *o) |
3458 | { |
3459 | int r1 = get_field(s->fields, l1); |
3460 | gen_helper_mvcs(cc_op, cpu_env, regs[r1], o->addr1, o->in2); |
3461 | set_cc_static(s); |
3462 | return DISAS_NEXT; |
3463 | } |
3464 | #endif |
3465 | |
3466 | static DisasJumpType op_mvn(DisasContext *s, DisasOps *o) |
3467 | { |
3468 | TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); |
3469 | gen_helper_mvn(cpu_env, l, o->addr1, o->in2); |
3470 | tcg_temp_free_i32(l); |
3471 | return DISAS_NEXT; |
3472 | } |
3473 | |
3474 | static DisasJumpType op_mvo(DisasContext *s, DisasOps *o) |
3475 | { |
3476 | TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); |
3477 | gen_helper_mvo(cpu_env, l, o->addr1, o->in2); |
3478 | tcg_temp_free_i32(l); |
3479 | return DISAS_NEXT; |
3480 | } |
3481 | |
3482 | static DisasJumpType op_mvpg(DisasContext *s, DisasOps *o) |
3483 | { |
3484 | gen_helper_mvpg(cc_op, cpu_env, regs[0], o->in1, o->in2); |
3485 | set_cc_static(s); |
3486 | return DISAS_NEXT; |
3487 | } |
3488 | |
3489 | static DisasJumpType op_mvst(DisasContext *s, DisasOps *o) |
3490 | { |
3491 | gen_helper_mvst(o->in1, cpu_env, regs[0], o->in1, o->in2); |
3492 | set_cc_static(s); |
3493 | return_low128(o->in2); |
3494 | return DISAS_NEXT; |
3495 | } |
3496 | |
3497 | static DisasJumpType op_mvz(DisasContext *s, DisasOps *o) |
3498 | { |
3499 | TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); |
3500 | gen_helper_mvz(cpu_env, l, o->addr1, o->in2); |
3501 | tcg_temp_free_i32(l); |
3502 | return DISAS_NEXT; |
3503 | } |
3504 | |
3505 | static DisasJumpType op_mul(DisasContext *s, DisasOps *o) |
3506 | { |
3507 | tcg_gen_mul_i64(o->out, o->in1, o->in2); |
3508 | return DISAS_NEXT; |
3509 | } |
3510 | |
3511 | static DisasJumpType op_mul128(DisasContext *s, DisasOps *o) |
3512 | { |
3513 | tcg_gen_mulu2_i64(o->out2, o->out, o->in1, o->in2); |
3514 | return DISAS_NEXT; |
3515 | } |
3516 | |
3517 | static DisasJumpType op_meeb(DisasContext *s, DisasOps *o) |
3518 | { |
3519 | gen_helper_meeb(o->out, cpu_env, o->in1, o->in2); |
3520 | return DISAS_NEXT; |
3521 | } |
3522 | |
3523 | static DisasJumpType op_mdeb(DisasContext *s, DisasOps *o) |
3524 | { |
3525 | gen_helper_mdeb(o->out, cpu_env, o->in1, o->in2); |
3526 | return DISAS_NEXT; |
3527 | } |
3528 | |
3529 | static DisasJumpType op_mdb(DisasContext *s, DisasOps *o) |
3530 | { |
3531 | gen_helper_mdb(o->out, cpu_env, o->in1, o->in2); |
3532 | return DISAS_NEXT; |
3533 | } |
3534 | |
3535 | static DisasJumpType op_mxb(DisasContext *s, DisasOps *o) |
3536 | { |
3537 | gen_helper_mxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); |
3538 | return_low128(o->out2); |
3539 | return DISAS_NEXT; |
3540 | } |
3541 | |
3542 | static DisasJumpType op_mxdb(DisasContext *s, DisasOps *o) |
3543 | { |
3544 | gen_helper_mxdb(o->out, cpu_env, o->out, o->out2, o->in2); |
3545 | return_low128(o->out2); |
3546 | return DISAS_NEXT; |
3547 | } |
3548 | |
3549 | static DisasJumpType op_maeb(DisasContext *s, DisasOps *o) |
3550 | { |
3551 | TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3)); |
3552 | gen_helper_maeb(o->out, cpu_env, o->in1, o->in2, r3); |
3553 | tcg_temp_free_i64(r3); |
3554 | return DISAS_NEXT; |
3555 | } |
3556 | |
3557 | static DisasJumpType op_madb(DisasContext *s, DisasOps *o) |
3558 | { |
3559 | TCGv_i64 r3 = load_freg(get_field(s->fields, r3)); |
3560 | gen_helper_madb(o->out, cpu_env, o->in1, o->in2, r3); |
3561 | tcg_temp_free_i64(r3); |
3562 | return DISAS_NEXT; |
3563 | } |
3564 | |
3565 | static DisasJumpType op_mseb(DisasContext *s, DisasOps *o) |
3566 | { |
3567 | TCGv_i64 r3 = load_freg32_i64(get_field(s->fields, r3)); |
3568 | gen_helper_mseb(o->out, cpu_env, o->in1, o->in2, r3); |
3569 | tcg_temp_free_i64(r3); |
3570 | return DISAS_NEXT; |
3571 | } |
3572 | |
3573 | static DisasJumpType op_msdb(DisasContext *s, DisasOps *o) |
3574 | { |
3575 | TCGv_i64 r3 = load_freg(get_field(s->fields, r3)); |
3576 | gen_helper_msdb(o->out, cpu_env, o->in1, o->in2, r3); |
3577 | tcg_temp_free_i64(r3); |
3578 | return DISAS_NEXT; |
3579 | } |
3580 | |
3581 | static DisasJumpType op_nabs(DisasContext *s, DisasOps *o) |
3582 | { |
3583 | TCGv_i64 z, n; |
3584 | z = tcg_const_i64(0); |
3585 | n = tcg_temp_new_i64(); |
3586 | tcg_gen_neg_i64(n, o->in2); |
3587 | tcg_gen_movcond_i64(TCG_COND_GE, o->out, o->in2, z, n, o->in2); |
3588 | tcg_temp_free_i64(n); |
3589 | tcg_temp_free_i64(z); |
3590 | return DISAS_NEXT; |
3591 | } |
3592 | |
3593 | static DisasJumpType op_nabsf32(DisasContext *s, DisasOps *o) |
3594 | { |
3595 | tcg_gen_ori_i64(o->out, o->in2, 0x80000000ull); |
3596 | return DISAS_NEXT; |
3597 | } |
3598 | |
3599 | static DisasJumpType op_nabsf64(DisasContext *s, DisasOps *o) |
3600 | { |
3601 | tcg_gen_ori_i64(o->out, o->in2, 0x8000000000000000ull); |
3602 | return DISAS_NEXT; |
3603 | } |
3604 | |
3605 | static DisasJumpType op_nabsf128(DisasContext *s, DisasOps *o) |
3606 | { |
3607 | tcg_gen_ori_i64(o->out, o->in1, 0x8000000000000000ull); |
3608 | tcg_gen_mov_i64(o->out2, o->in2); |
3609 | return DISAS_NEXT; |
3610 | } |
3611 | |
3612 | static DisasJumpType op_nc(DisasContext *s, DisasOps *o) |
3613 | { |
3614 | TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); |
3615 | gen_helper_nc(cc_op, cpu_env, l, o->addr1, o->in2); |
3616 | tcg_temp_free_i32(l); |
3617 | set_cc_static(s); |
3618 | return DISAS_NEXT; |
3619 | } |
3620 | |
3621 | static DisasJumpType op_neg(DisasContext *s, DisasOps *o) |
3622 | { |
3623 | tcg_gen_neg_i64(o->out, o->in2); |
3624 | return DISAS_NEXT; |
3625 | } |
3626 | |
3627 | static DisasJumpType op_negf32(DisasContext *s, DisasOps *o) |
3628 | { |
3629 | tcg_gen_xori_i64(o->out, o->in2, 0x80000000ull); |
3630 | return DISAS_NEXT; |
3631 | } |
3632 | |
3633 | static DisasJumpType op_negf64(DisasContext *s, DisasOps *o) |
3634 | { |
3635 | tcg_gen_xori_i64(o->out, o->in2, 0x8000000000000000ull); |
3636 | return DISAS_NEXT; |
3637 | } |
3638 | |
3639 | static DisasJumpType op_negf128(DisasContext *s, DisasOps *o) |
3640 | { |
3641 | tcg_gen_xori_i64(o->out, o->in1, 0x8000000000000000ull); |
3642 | tcg_gen_mov_i64(o->out2, o->in2); |
3643 | return DISAS_NEXT; |
3644 | } |
3645 | |
3646 | static DisasJumpType op_oc(DisasContext *s, DisasOps *o) |
3647 | { |
3648 | TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); |
3649 | gen_helper_oc(cc_op, cpu_env, l, o->addr1, o->in2); |
3650 | tcg_temp_free_i32(l); |
3651 | set_cc_static(s); |
3652 | return DISAS_NEXT; |
3653 | } |
3654 | |
3655 | static DisasJumpType op_or(DisasContext *s, DisasOps *o) |
3656 | { |
3657 | tcg_gen_or_i64(o->out, o->in1, o->in2); |
3658 | return DISAS_NEXT; |
3659 | } |
3660 | |
3661 | static DisasJumpType op_ori(DisasContext *s, DisasOps *o) |
3662 | { |
3663 | int shift = s->insn->data & 0xff; |
3664 | int size = s->insn->data >> 8; |
3665 | uint64_t mask = ((1ull << size) - 1) << shift; |
3666 | |
3667 | assert(!o->g_in2); |
3668 | tcg_gen_shli_i64(o->in2, o->in2, shift); |
3669 | tcg_gen_or_i64(o->out, o->in1, o->in2); |
3670 | |
3671 | /* Produce the CC from only the bits manipulated. */ |
3672 | tcg_gen_andi_i64(cc_dst, o->out, mask); |
3673 | set_cc_nz_u64(s, cc_dst); |
3674 | return DISAS_NEXT; |
3675 | } |
3676 | |
3677 | static DisasJumpType op_oi(DisasContext *s, DisasOps *o) |
3678 | { |
3679 | o->in1 = tcg_temp_new_i64(); |
3680 | |
3681 | if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { |
3682 | tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); |
3683 | } else { |
3684 | /* Perform the atomic operation in memory. */ |
3685 | tcg_gen_atomic_fetch_or_i64(o->in1, o->addr1, o->in2, get_mem_index(s), |
3686 | s->insn->data); |
3687 | } |
3688 | |
3689 | /* Recompute also for atomic case: needed for setting CC. */ |
3690 | tcg_gen_or_i64(o->out, o->in1, o->in2); |
3691 | |
3692 | if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { |
3693 | tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); |
3694 | } |
3695 | return DISAS_NEXT; |
3696 | } |
3697 | |
3698 | static DisasJumpType op_pack(DisasContext *s, DisasOps *o) |
3699 | { |
3700 | TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); |
3701 | gen_helper_pack(cpu_env, l, o->addr1, o->in2); |
3702 | tcg_temp_free_i32(l); |
3703 | return DISAS_NEXT; |
3704 | } |
3705 | |
3706 | static DisasJumpType op_pka(DisasContext *s, DisasOps *o) |
3707 | { |
3708 | int l2 = get_field(s->fields, l2) + 1; |
3709 | TCGv_i32 l; |
3710 | |
3711 | /* The length must not exceed 32 bytes. */ |
3712 | if (l2 > 32) { |
3713 | gen_program_exception(s, PGM_SPECIFICATION); |
3714 | return DISAS_NORETURN; |
3715 | } |
3716 | l = tcg_const_i32(l2); |
3717 | gen_helper_pka(cpu_env, o->addr1, o->in2, l); |
3718 | tcg_temp_free_i32(l); |
3719 | return DISAS_NEXT; |
3720 | } |
3721 | |
3722 | static DisasJumpType op_pku(DisasContext *s, DisasOps *o) |
3723 | { |
3724 | int l2 = get_field(s->fields, l2) + 1; |
3725 | TCGv_i32 l; |
3726 | |
3727 | /* The length must be even and should not exceed 64 bytes. */ |
3728 | if ((l2 & 1) || (l2 > 64)) { |
3729 | gen_program_exception(s, PGM_SPECIFICATION); |
3730 | return DISAS_NORETURN; |
3731 | } |
3732 | l = tcg_const_i32(l2); |
3733 | gen_helper_pku(cpu_env, o->addr1, o->in2, l); |
3734 | tcg_temp_free_i32(l); |
3735 | return DISAS_NEXT; |
3736 | } |
3737 | |
3738 | static DisasJumpType op_popcnt(DisasContext *s, DisasOps *o) |
3739 | { |
3740 | gen_helper_popcnt(o->out, o->in2); |
3741 | return DISAS_NEXT; |
3742 | } |
3743 | |
3744 | #ifndef CONFIG_USER_ONLY |
3745 | static DisasJumpType op_ptlb(DisasContext *s, DisasOps *o) |
3746 | { |
3747 | gen_helper_ptlb(cpu_env); |
3748 | return DISAS_NEXT; |
3749 | } |
3750 | #endif |
3751 | |
3752 | static DisasJumpType op_risbg(DisasContext *s, DisasOps *o) |
3753 | { |
3754 | int i3 = get_field(s->fields, i3); |
3755 | int i4 = get_field(s->fields, i4); |
3756 | int i5 = get_field(s->fields, i5); |
3757 | int do_zero = i4 & 0x80; |
3758 | uint64_t mask, imask, pmask; |
3759 | int pos, len, rot; |
3760 | |
3761 | /* Adjust the arguments for the specific insn. */ |
3762 | switch (s->fields->op2) { |
3763 | case 0x55: /* risbg */ |
3764 | case 0x59: /* risbgn */ |
3765 | i3 &= 63; |
3766 | i4 &= 63; |
3767 | pmask = ~0; |
3768 | break; |
3769 | case 0x5d: /* risbhg */ |
3770 | i3 &= 31; |
3771 | i4 &= 31; |
3772 | pmask = 0xffffffff00000000ull; |
3773 | break; |
3774 | case 0x51: /* risblg */ |
3775 | i3 &= 31; |
3776 | i4 &= 31; |
3777 | pmask = 0x00000000ffffffffull; |
3778 | break; |
3779 | default: |
3780 | g_assert_not_reached(); |
3781 | } |
3782 | |
3783 | /* MASK is the set of bits to be inserted from R2. |
3784 | Take care for I3/I4 wraparound. */ |
3785 | mask = pmask >> i3; |
3786 | if (i3 <= i4) { |
3787 | mask ^= pmask >> i4 >> 1; |
3788 | } else { |
3789 | mask |= ~(pmask >> i4 >> 1); |
3790 | } |
3791 | mask &= pmask; |
3792 | |
3793 | /* IMASK is the set of bits to be kept from R1. In the case of the high/low |
3794 | insns, we need to keep the other half of the register. */ |
3795 | imask = ~mask | ~pmask; |
3796 | if (do_zero) { |
3797 | imask = ~pmask; |
3798 | } |
3799 | |
3800 | len = i4 - i3 + 1; |
3801 | pos = 63 - i4; |
3802 | rot = i5 & 63; |
3803 | if (s->fields->op2 == 0x5d) { |
3804 | pos += 32; |
3805 | } |
3806 | |
3807 | /* In some cases we can implement this with extract. */ |
3808 | if (imask == 0 && pos == 0 && len > 0 && len <= rot) { |
3809 | tcg_gen_extract_i64(o->out, o->in2, 64 - rot, len); |
3810 | return DISAS_NEXT; |
3811 | } |
3812 | |
3813 | /* In some cases we can implement this with deposit. */ |
3814 | if (len > 0 && (imask == 0 || ~mask == imask)) { |
3815 | /* Note that we rotate the bits to be inserted to the lsb, not to |
3816 | the position as described in the PoO. */ |
3817 | rot = (rot - pos) & 63; |
3818 | } else { |
3819 | pos = -1; |
3820 | } |
3821 | |
3822 | /* Rotate the input as necessary. */ |
3823 | tcg_gen_rotli_i64(o->in2, o->in2, rot); |
3824 | |
3825 | /* Insert the selected bits into the output. */ |
3826 | if (pos >= 0) { |
3827 | if (imask == 0) { |
3828 | tcg_gen_deposit_z_i64(o->out, o->in2, pos, len); |
3829 | } else { |
3830 | tcg_gen_deposit_i64(o->out, o->out, o->in2, pos, len); |
3831 | } |
3832 | } else if (imask == 0) { |
3833 | tcg_gen_andi_i64(o->out, o->in2, mask); |
3834 | } else { |
3835 | tcg_gen_andi_i64(o->in2, o->in2, mask); |
3836 | tcg_gen_andi_i64(o->out, o->out, imask); |
3837 | tcg_gen_or_i64(o->out, o->out, o->in2); |
3838 | } |
3839 | return DISAS_NEXT; |
3840 | } |
3841 | |
3842 | static DisasJumpType op_rosbg(DisasContext *s, DisasOps *o) |
3843 | { |
3844 | int i3 = get_field(s->fields, i3); |
3845 | int i4 = get_field(s->fields, i4); |
3846 | int i5 = get_field(s->fields, i5); |
3847 | uint64_t mask; |
3848 | |
3849 | /* If this is a test-only form, arrange to discard the result. */ |
3850 | if (i3 & 0x80) { |
3851 | o->out = tcg_temp_new_i64(); |
3852 | o->g_out = false; |
3853 | } |
3854 | |
3855 | i3 &= 63; |
3856 | i4 &= 63; |
3857 | i5 &= 63; |
3858 | |
3859 | /* MASK is the set of bits to be operated on from R2. |
3860 | Take care for I3/I4 wraparound. */ |
3861 | mask = ~0ull >> i3; |
3862 | if (i3 <= i4) { |
3863 | mask ^= ~0ull >> i4 >> 1; |
3864 | } else { |
3865 | mask |= ~(~0ull >> i4 >> 1); |
3866 | } |
3867 | |
3868 | /* Rotate the input as necessary. */ |
3869 | tcg_gen_rotli_i64(o->in2, o->in2, i5); |
3870 | |
3871 | /* Operate. */ |
3872 | switch (s->fields->op2) { |
3873 | case 0x55: /* AND */ |
3874 | tcg_gen_ori_i64(o->in2, o->in2, ~mask); |
3875 | tcg_gen_and_i64(o->out, o->out, o->in2); |
3876 | break; |
3877 | case 0x56: /* OR */ |
3878 | tcg_gen_andi_i64(o->in2, o->in2, mask); |
3879 | tcg_gen_or_i64(o->out, o->out, o->in2); |
3880 | break; |
3881 | case 0x57: /* XOR */ |
3882 | tcg_gen_andi_i64(o->in2, o->in2, mask); |
3883 | tcg_gen_xor_i64(o->out, o->out, o->in2); |
3884 | break; |
3885 | default: |
3886 | abort(); |
3887 | } |
3888 | |
3889 | /* Set the CC. */ |
3890 | tcg_gen_andi_i64(cc_dst, o->out, mask); |
3891 | set_cc_nz_u64(s, cc_dst); |
3892 | return DISAS_NEXT; |
3893 | } |
3894 | |
3895 | static DisasJumpType op_rev16(DisasContext *s, DisasOps *o) |
3896 | { |
3897 | tcg_gen_bswap16_i64(o->out, o->in2); |
3898 | return DISAS_NEXT; |
3899 | } |
3900 | |
3901 | static DisasJumpType op_rev32(DisasContext *s, DisasOps *o) |
3902 | { |
3903 | tcg_gen_bswap32_i64(o->out, o->in2); |
3904 | return DISAS_NEXT; |
3905 | } |
3906 | |
3907 | static DisasJumpType op_rev64(DisasContext *s, DisasOps *o) |
3908 | { |
3909 | tcg_gen_bswap64_i64(o->out, o->in2); |
3910 | return DISAS_NEXT; |
3911 | } |
3912 | |
3913 | static DisasJumpType op_rll32(DisasContext *s, DisasOps *o) |
3914 | { |
3915 | TCGv_i32 t1 = tcg_temp_new_i32(); |
3916 | TCGv_i32 t2 = tcg_temp_new_i32(); |
3917 | TCGv_i32 to = tcg_temp_new_i32(); |
3918 | tcg_gen_extrl_i64_i32(t1, o->in1); |
3919 | tcg_gen_extrl_i64_i32(t2, o->in2); |
3920 | tcg_gen_rotl_i32(to, t1, t2); |
3921 | tcg_gen_extu_i32_i64(o->out, to); |
3922 | tcg_temp_free_i32(t1); |
3923 | tcg_temp_free_i32(t2); |
3924 | tcg_temp_free_i32(to); |
3925 | return DISAS_NEXT; |
3926 | } |
3927 | |
3928 | static DisasJumpType op_rll64(DisasContext *s, DisasOps *o) |
3929 | { |
3930 | tcg_gen_rotl_i64(o->out, o->in1, o->in2); |
3931 | return DISAS_NEXT; |
3932 | } |
3933 | |
3934 | #ifndef CONFIG_USER_ONLY |
3935 | static DisasJumpType op_rrbe(DisasContext *s, DisasOps *o) |
3936 | { |
3937 | gen_helper_rrbe(cc_op, cpu_env, o->in2); |
3938 | set_cc_static(s); |
3939 | return DISAS_NEXT; |
3940 | } |
3941 | |
3942 | static DisasJumpType op_sacf(DisasContext *s, DisasOps *o) |
3943 | { |
3944 | gen_helper_sacf(cpu_env, o->in2); |
3945 | /* Addressing mode has changed, so end the block. */ |
3946 | return DISAS_PC_STALE; |
3947 | } |
3948 | #endif |
3949 | |
3950 | static DisasJumpType op_sam(DisasContext *s, DisasOps *o) |
3951 | { |
3952 | int sam = s->insn->data; |
3953 | TCGv_i64 tsam; |
3954 | uint64_t mask; |
3955 | |
3956 | switch (sam) { |
3957 | case 0: |
3958 | mask = 0xffffff; |
3959 | break; |
3960 | case 1: |
3961 | mask = 0x7fffffff; |
3962 | break; |
3963 | default: |
3964 | mask = -1; |
3965 | break; |
3966 | } |
3967 | |
3968 | /* Bizarre but true, we check the address of the current insn for the |
3969 | specification exception, not the next to be executed. Thus the PoO |
3970 | documents that Bad Things Happen two bytes before the end. */ |
3971 | if (s->base.pc_next & ~mask) { |
3972 | gen_program_exception(s, PGM_SPECIFICATION); |
3973 | return DISAS_NORETURN; |
3974 | } |
3975 | s->pc_tmp &= mask; |
3976 | |
3977 | tsam = tcg_const_i64(sam); |
3978 | tcg_gen_deposit_i64(psw_mask, psw_mask, tsam, 31, 2); |
3979 | tcg_temp_free_i64(tsam); |
3980 | |
3981 | /* Always exit the TB, since we (may have) changed execution mode. */ |
3982 | return DISAS_PC_STALE; |
3983 | } |
3984 | |
3985 | static DisasJumpType op_sar(DisasContext *s, DisasOps *o) |
3986 | { |
3987 | int r1 = get_field(s->fields, r1); |
3988 | tcg_gen_st32_i64(o->in2, cpu_env, offsetof(CPUS390XState, aregs[r1])); |
3989 | return DISAS_NEXT; |
3990 | } |
3991 | |
3992 | static DisasJumpType op_seb(DisasContext *s, DisasOps *o) |
3993 | { |
3994 | gen_helper_seb(o->out, cpu_env, o->in1, o->in2); |
3995 | return DISAS_NEXT; |
3996 | } |
3997 | |
3998 | static DisasJumpType op_sdb(DisasContext *s, DisasOps *o) |
3999 | { |
4000 | gen_helper_sdb(o->out, cpu_env, o->in1, o->in2); |
4001 | return DISAS_NEXT; |
4002 | } |
4003 | |
4004 | static DisasJumpType op_sxb(DisasContext *s, DisasOps *o) |
4005 | { |
4006 | gen_helper_sxb(o->out, cpu_env, o->out, o->out2, o->in1, o->in2); |
4007 | return_low128(o->out2); |
4008 | return DISAS_NEXT; |
4009 | } |
4010 | |
4011 | static DisasJumpType op_sqeb(DisasContext *s, DisasOps *o) |
4012 | { |
4013 | gen_helper_sqeb(o->out, cpu_env, o->in2); |
4014 | return DISAS_NEXT; |
4015 | } |
4016 | |
4017 | static DisasJumpType op_sqdb(DisasContext *s, DisasOps *o) |
4018 | { |
4019 | gen_helper_sqdb(o->out, cpu_env, o->in2); |
4020 | return DISAS_NEXT; |
4021 | } |
4022 | |
4023 | static DisasJumpType op_sqxb(DisasContext *s, DisasOps *o) |
4024 | { |
4025 | gen_helper_sqxb(o->out, cpu_env, o->in1, o->in2); |
4026 | return_low128(o->out2); |
4027 | return DISAS_NEXT; |
4028 | } |
4029 | |
4030 | #ifndef CONFIG_USER_ONLY |
4031 | static DisasJumpType op_servc(DisasContext *s, DisasOps *o) |
4032 | { |
4033 | gen_helper_servc(cc_op, cpu_env, o->in2, o->in1); |
4034 | set_cc_static(s); |
4035 | return DISAS_NEXT; |
4036 | } |
4037 | |
4038 | static DisasJumpType op_sigp(DisasContext *s, DisasOps *o) |
4039 | { |
4040 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
4041 | TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
4042 | gen_helper_sigp(cc_op, cpu_env, o->in2, r1, r3); |
4043 | set_cc_static(s); |
4044 | tcg_temp_free_i32(r1); |
4045 | tcg_temp_free_i32(r3); |
4046 | return DISAS_NEXT; |
4047 | } |
4048 | #endif |
4049 | |
4050 | static DisasJumpType op_soc(DisasContext *s, DisasOps *o) |
4051 | { |
4052 | DisasCompare c; |
4053 | TCGv_i64 a, h; |
4054 | TCGLabel *lab; |
4055 | int r1; |
4056 | |
4057 | disas_jcc(s, &c, get_field(s->fields, m3)); |
4058 | |
4059 | /* We want to store when the condition is fulfilled, so branch |
4060 | out when it's not */ |
4061 | c.cond = tcg_invert_cond(c.cond); |
4062 | |
4063 | lab = gen_new_label(); |
4064 | if (c.is_64) { |
4065 | tcg_gen_brcond_i64(c.cond, c.u.s64.a, c.u.s64.b, lab); |
4066 | } else { |
4067 | tcg_gen_brcond_i32(c.cond, c.u.s32.a, c.u.s32.b, lab); |
4068 | } |
4069 | free_compare(&c); |
4070 | |
4071 | r1 = get_field(s->fields, r1); |
4072 | a = get_address(s, 0, get_field(s->fields, b2), get_field(s->fields, d2)); |
4073 | switch (s->insn->data) { |
4074 | case 1: /* STOCG */ |
4075 | tcg_gen_qemu_st64(regs[r1], a, get_mem_index(s)); |
4076 | break; |
4077 | case 0: /* STOC */ |
4078 | tcg_gen_qemu_st32(regs[r1], a, get_mem_index(s)); |
4079 | break; |
4080 | case 2: /* STOCFH */ |
4081 | h = tcg_temp_new_i64(); |
4082 | tcg_gen_shri_i64(h, regs[r1], 32); |
4083 | tcg_gen_qemu_st32(h, a, get_mem_index(s)); |
4084 | tcg_temp_free_i64(h); |
4085 | break; |
4086 | default: |
4087 | g_assert_not_reached(); |
4088 | } |
4089 | tcg_temp_free_i64(a); |
4090 | |
4091 | gen_set_label(lab); |
4092 | return DISAS_NEXT; |
4093 | } |
4094 | |
4095 | static DisasJumpType op_sla(DisasContext *s, DisasOps *o) |
4096 | { |
4097 | uint64_t sign = 1ull << s->insn->data; |
4098 | enum cc_op cco = s->insn->data == 31 ? CC_OP_SLA_32 : CC_OP_SLA_64; |
4099 | gen_op_update2_cc_i64(s, cco, o->in1, o->in2); |
4100 | tcg_gen_shl_i64(o->out, o->in1, o->in2); |
4101 | /* The arithmetic left shift is curious in that it does not affect |
4102 | the sign bit. Copy that over from the source unchanged. */ |
4103 | tcg_gen_andi_i64(o->out, o->out, ~sign); |
4104 | tcg_gen_andi_i64(o->in1, o->in1, sign); |
4105 | tcg_gen_or_i64(o->out, o->out, o->in1); |
4106 | return DISAS_NEXT; |
4107 | } |
4108 | |
4109 | static DisasJumpType op_sll(DisasContext *s, DisasOps *o) |
4110 | { |
4111 | tcg_gen_shl_i64(o->out, o->in1, o->in2); |
4112 | return DISAS_NEXT; |
4113 | } |
4114 | |
4115 | static DisasJumpType op_sra(DisasContext *s, DisasOps *o) |
4116 | { |
4117 | tcg_gen_sar_i64(o->out, o->in1, o->in2); |
4118 | return DISAS_NEXT; |
4119 | } |
4120 | |
4121 | static DisasJumpType op_srl(DisasContext *s, DisasOps *o) |
4122 | { |
4123 | tcg_gen_shr_i64(o->out, o->in1, o->in2); |
4124 | return DISAS_NEXT; |
4125 | } |
4126 | |
4127 | static DisasJumpType op_sfpc(DisasContext *s, DisasOps *o) |
4128 | { |
4129 | gen_helper_sfpc(cpu_env, o->in2); |
4130 | return DISAS_NEXT; |
4131 | } |
4132 | |
4133 | static DisasJumpType op_sfas(DisasContext *s, DisasOps *o) |
4134 | { |
4135 | gen_helper_sfas(cpu_env, o->in2); |
4136 | return DISAS_NEXT; |
4137 | } |
4138 | |
4139 | static DisasJumpType op_srnm(DisasContext *s, DisasOps *o) |
4140 | { |
4141 | /* Bits other than 62 and 63 are ignored. Bit 29 is set to zero. */ |
4142 | tcg_gen_andi_i64(o->addr1, o->addr1, 0x3ull); |
4143 | gen_helper_srnm(cpu_env, o->addr1); |
4144 | return DISAS_NEXT; |
4145 | } |
4146 | |
4147 | static DisasJumpType op_srnmb(DisasContext *s, DisasOps *o) |
4148 | { |
4149 | /* Bits 0-55 are are ignored. */ |
4150 | tcg_gen_andi_i64(o->addr1, o->addr1, 0xffull); |
4151 | gen_helper_srnm(cpu_env, o->addr1); |
4152 | return DISAS_NEXT; |
4153 | } |
4154 | |
4155 | static DisasJumpType op_srnmt(DisasContext *s, DisasOps *o) |
4156 | { |
4157 | TCGv_i64 tmp = tcg_temp_new_i64(); |
4158 | |
4159 | /* Bits other than 61-63 are ignored. */ |
4160 | tcg_gen_andi_i64(o->addr1, o->addr1, 0x7ull); |
4161 | |
4162 | /* No need to call a helper, we don't implement dfp */ |
4163 | tcg_gen_ld32u_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc)); |
4164 | tcg_gen_deposit_i64(tmp, tmp, o->addr1, 4, 3); |
4165 | tcg_gen_st32_i64(tmp, cpu_env, offsetof(CPUS390XState, fpc)); |
4166 | |
4167 | tcg_temp_free_i64(tmp); |
4168 | return DISAS_NEXT; |
4169 | } |
4170 | |
4171 | static DisasJumpType op_spm(DisasContext *s, DisasOps *o) |
4172 | { |
4173 | tcg_gen_extrl_i64_i32(cc_op, o->in1); |
4174 | tcg_gen_extract_i32(cc_op, cc_op, 28, 2); |
4175 | set_cc_static(s); |
4176 | |
4177 | tcg_gen_shri_i64(o->in1, o->in1, 24); |
4178 | tcg_gen_deposit_i64(psw_mask, psw_mask, o->in1, PSW_SHIFT_MASK_PM, 4); |
4179 | return DISAS_NEXT; |
4180 | } |
4181 | |
4182 | static DisasJumpType op_ectg(DisasContext *s, DisasOps *o) |
4183 | { |
4184 | int b1 = get_field(s->fields, b1); |
4185 | int d1 = get_field(s->fields, d1); |
4186 | int b2 = get_field(s->fields, b2); |
4187 | int d2 = get_field(s->fields, d2); |
4188 | int r3 = get_field(s->fields, r3); |
4189 | TCGv_i64 tmp = tcg_temp_new_i64(); |
4190 | |
4191 | /* fetch all operands first */ |
4192 | o->in1 = tcg_temp_new_i64(); |
4193 | tcg_gen_addi_i64(o->in1, regs[b1], d1); |
4194 | o->in2 = tcg_temp_new_i64(); |
4195 | tcg_gen_addi_i64(o->in2, regs[b2], d2); |
4196 | o->addr1 = get_address(s, 0, r3, 0); |
4197 | |
4198 | /* load the third operand into r3 before modifying anything */ |
4199 | tcg_gen_qemu_ld64(regs[r3], o->addr1, get_mem_index(s)); |
4200 | |
4201 | /* subtract CPU timer from first operand and store in GR0 */ |
4202 | gen_helper_stpt(tmp, cpu_env); |
4203 | tcg_gen_sub_i64(regs[0], o->in1, tmp); |
4204 | |
4205 | /* store second operand in GR1 */ |
4206 | tcg_gen_mov_i64(regs[1], o->in2); |
4207 | |
4208 | tcg_temp_free_i64(tmp); |
4209 | return DISAS_NEXT; |
4210 | } |
4211 | |
4212 | #ifndef CONFIG_USER_ONLY |
4213 | static DisasJumpType op_spka(DisasContext *s, DisasOps *o) |
4214 | { |
4215 | tcg_gen_shri_i64(o->in2, o->in2, 4); |
4216 | tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, PSW_SHIFT_KEY, 4); |
4217 | return DISAS_NEXT; |
4218 | } |
4219 | |
4220 | static DisasJumpType op_sske(DisasContext *s, DisasOps *o) |
4221 | { |
4222 | gen_helper_sske(cpu_env, o->in1, o->in2); |
4223 | return DISAS_NEXT; |
4224 | } |
4225 | |
4226 | static DisasJumpType op_ssm(DisasContext *s, DisasOps *o) |
4227 | { |
4228 | tcg_gen_deposit_i64(psw_mask, psw_mask, o->in2, 56, 8); |
4229 | /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ |
4230 | return DISAS_PC_STALE_NOCHAIN; |
4231 | } |
4232 | |
4233 | static DisasJumpType op_stap(DisasContext *s, DisasOps *o) |
4234 | { |
4235 | tcg_gen_ld32u_i64(o->out, cpu_env, offsetof(CPUS390XState, core_id)); |
4236 | return DISAS_NEXT; |
4237 | } |
4238 | #endif |
4239 | |
4240 | static DisasJumpType op_stck(DisasContext *s, DisasOps *o) |
4241 | { |
4242 | gen_helper_stck(o->out, cpu_env); |
4243 | /* ??? We don't implement clock states. */ |
4244 | gen_op_movi_cc(s, 0); |
4245 | return DISAS_NEXT; |
4246 | } |
4247 | |
4248 | static DisasJumpType op_stcke(DisasContext *s, DisasOps *o) |
4249 | { |
4250 | TCGv_i64 c1 = tcg_temp_new_i64(); |
4251 | TCGv_i64 c2 = tcg_temp_new_i64(); |
4252 | TCGv_i64 todpr = tcg_temp_new_i64(); |
4253 | gen_helper_stck(c1, cpu_env); |
4254 | /* 16 bit value store in an uint32_t (only valid bits set) */ |
4255 | tcg_gen_ld32u_i64(todpr, cpu_env, offsetof(CPUS390XState, todpr)); |
4256 | /* Shift the 64-bit value into its place as a zero-extended |
4257 | 104-bit value. Note that "bit positions 64-103 are always |
4258 | non-zero so that they compare differently to STCK"; we set |
4259 | the least significant bit to 1. */ |
4260 | tcg_gen_shli_i64(c2, c1, 56); |
4261 | tcg_gen_shri_i64(c1, c1, 8); |
4262 | tcg_gen_ori_i64(c2, c2, 0x10000); |
4263 | tcg_gen_or_i64(c2, c2, todpr); |
4264 | tcg_gen_qemu_st64(c1, o->in2, get_mem_index(s)); |
4265 | tcg_gen_addi_i64(o->in2, o->in2, 8); |
4266 | tcg_gen_qemu_st64(c2, o->in2, get_mem_index(s)); |
4267 | tcg_temp_free_i64(c1); |
4268 | tcg_temp_free_i64(c2); |
4269 | tcg_temp_free_i64(todpr); |
4270 | /* ??? We don't implement clock states. */ |
4271 | gen_op_movi_cc(s, 0); |
4272 | return DISAS_NEXT; |
4273 | } |
4274 | |
4275 | #ifndef CONFIG_USER_ONLY |
4276 | static DisasJumpType op_sck(DisasContext *s, DisasOps *o) |
4277 | { |
4278 | tcg_gen_qemu_ld_i64(o->in1, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); |
4279 | gen_helper_sck(cc_op, cpu_env, o->in1); |
4280 | set_cc_static(s); |
4281 | return DISAS_NEXT; |
4282 | } |
4283 | |
4284 | static DisasJumpType op_sckc(DisasContext *s, DisasOps *o) |
4285 | { |
4286 | gen_helper_sckc(cpu_env, o->in2); |
4287 | return DISAS_NEXT; |
4288 | } |
4289 | |
4290 | static DisasJumpType op_sckpf(DisasContext *s, DisasOps *o) |
4291 | { |
4292 | gen_helper_sckpf(cpu_env, regs[0]); |
4293 | return DISAS_NEXT; |
4294 | } |
4295 | |
4296 | static DisasJumpType op_stckc(DisasContext *s, DisasOps *o) |
4297 | { |
4298 | gen_helper_stckc(o->out, cpu_env); |
4299 | return DISAS_NEXT; |
4300 | } |
4301 | |
4302 | static DisasJumpType op_stctg(DisasContext *s, DisasOps *o) |
4303 | { |
4304 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
4305 | TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
4306 | gen_helper_stctg(cpu_env, r1, o->in2, r3); |
4307 | tcg_temp_free_i32(r1); |
4308 | tcg_temp_free_i32(r3); |
4309 | return DISAS_NEXT; |
4310 | } |
4311 | |
4312 | static DisasJumpType op_stctl(DisasContext *s, DisasOps *o) |
4313 | { |
4314 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
4315 | TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
4316 | gen_helper_stctl(cpu_env, r1, o->in2, r3); |
4317 | tcg_temp_free_i32(r1); |
4318 | tcg_temp_free_i32(r3); |
4319 | return DISAS_NEXT; |
4320 | } |
4321 | |
4322 | static DisasJumpType op_stidp(DisasContext *s, DisasOps *o) |
4323 | { |
4324 | tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, cpuid)); |
4325 | return DISAS_NEXT; |
4326 | } |
4327 | |
4328 | static DisasJumpType op_spt(DisasContext *s, DisasOps *o) |
4329 | { |
4330 | gen_helper_spt(cpu_env, o->in2); |
4331 | return DISAS_NEXT; |
4332 | } |
4333 | |
4334 | static DisasJumpType op_stfl(DisasContext *s, DisasOps *o) |
4335 | { |
4336 | gen_helper_stfl(cpu_env); |
4337 | return DISAS_NEXT; |
4338 | } |
4339 | |
4340 | static DisasJumpType op_stpt(DisasContext *s, DisasOps *o) |
4341 | { |
4342 | gen_helper_stpt(o->out, cpu_env); |
4343 | return DISAS_NEXT; |
4344 | } |
4345 | |
4346 | static DisasJumpType op_stsi(DisasContext *s, DisasOps *o) |
4347 | { |
4348 | gen_helper_stsi(cc_op, cpu_env, o->in2, regs[0], regs[1]); |
4349 | set_cc_static(s); |
4350 | return DISAS_NEXT; |
4351 | } |
4352 | |
4353 | static DisasJumpType op_spx(DisasContext *s, DisasOps *o) |
4354 | { |
4355 | gen_helper_spx(cpu_env, o->in2); |
4356 | return DISAS_NEXT; |
4357 | } |
4358 | |
4359 | static DisasJumpType op_xsch(DisasContext *s, DisasOps *o) |
4360 | { |
4361 | gen_helper_xsch(cpu_env, regs[1]); |
4362 | set_cc_static(s); |
4363 | return DISAS_NEXT; |
4364 | } |
4365 | |
4366 | static DisasJumpType op_csch(DisasContext *s, DisasOps *o) |
4367 | { |
4368 | gen_helper_csch(cpu_env, regs[1]); |
4369 | set_cc_static(s); |
4370 | return DISAS_NEXT; |
4371 | } |
4372 | |
4373 | static DisasJumpType op_hsch(DisasContext *s, DisasOps *o) |
4374 | { |
4375 | gen_helper_hsch(cpu_env, regs[1]); |
4376 | set_cc_static(s); |
4377 | return DISAS_NEXT; |
4378 | } |
4379 | |
4380 | static DisasJumpType op_msch(DisasContext *s, DisasOps *o) |
4381 | { |
4382 | gen_helper_msch(cpu_env, regs[1], o->in2); |
4383 | set_cc_static(s); |
4384 | return DISAS_NEXT; |
4385 | } |
4386 | |
4387 | static DisasJumpType op_rchp(DisasContext *s, DisasOps *o) |
4388 | { |
4389 | gen_helper_rchp(cpu_env, regs[1]); |
4390 | set_cc_static(s); |
4391 | return DISAS_NEXT; |
4392 | } |
4393 | |
4394 | static DisasJumpType op_rsch(DisasContext *s, DisasOps *o) |
4395 | { |
4396 | gen_helper_rsch(cpu_env, regs[1]); |
4397 | set_cc_static(s); |
4398 | return DISAS_NEXT; |
4399 | } |
4400 | |
4401 | static DisasJumpType op_sal(DisasContext *s, DisasOps *o) |
4402 | { |
4403 | gen_helper_sal(cpu_env, regs[1]); |
4404 | return DISAS_NEXT; |
4405 | } |
4406 | |
4407 | static DisasJumpType op_schm(DisasContext *s, DisasOps *o) |
4408 | { |
4409 | gen_helper_schm(cpu_env, regs[1], regs[2], o->in2); |
4410 | return DISAS_NEXT; |
4411 | } |
4412 | |
4413 | static DisasJumpType op_siga(DisasContext *s, DisasOps *o) |
4414 | { |
4415 | /* From KVM code: Not provided, set CC = 3 for subchannel not operational */ |
4416 | gen_op_movi_cc(s, 3); |
4417 | return DISAS_NEXT; |
4418 | } |
4419 | |
4420 | static DisasJumpType op_stcps(DisasContext *s, DisasOps *o) |
4421 | { |
4422 | /* The instruction is suppressed if not provided. */ |
4423 | return DISAS_NEXT; |
4424 | } |
4425 | |
4426 | static DisasJumpType op_ssch(DisasContext *s, DisasOps *o) |
4427 | { |
4428 | gen_helper_ssch(cpu_env, regs[1], o->in2); |
4429 | set_cc_static(s); |
4430 | return DISAS_NEXT; |
4431 | } |
4432 | |
4433 | static DisasJumpType op_stsch(DisasContext *s, DisasOps *o) |
4434 | { |
4435 | gen_helper_stsch(cpu_env, regs[1], o->in2); |
4436 | set_cc_static(s); |
4437 | return DISAS_NEXT; |
4438 | } |
4439 | |
4440 | static DisasJumpType op_stcrw(DisasContext *s, DisasOps *o) |
4441 | { |
4442 | gen_helper_stcrw(cpu_env, o->in2); |
4443 | set_cc_static(s); |
4444 | return DISAS_NEXT; |
4445 | } |
4446 | |
4447 | static DisasJumpType op_tpi(DisasContext *s, DisasOps *o) |
4448 | { |
4449 | gen_helper_tpi(cc_op, cpu_env, o->addr1); |
4450 | set_cc_static(s); |
4451 | return DISAS_NEXT; |
4452 | } |
4453 | |
4454 | static DisasJumpType op_tsch(DisasContext *s, DisasOps *o) |
4455 | { |
4456 | gen_helper_tsch(cpu_env, regs[1], o->in2); |
4457 | set_cc_static(s); |
4458 | return DISAS_NEXT; |
4459 | } |
4460 | |
4461 | static DisasJumpType op_chsc(DisasContext *s, DisasOps *o) |
4462 | { |
4463 | gen_helper_chsc(cpu_env, o->in2); |
4464 | set_cc_static(s); |
4465 | return DISAS_NEXT; |
4466 | } |
4467 | |
4468 | static DisasJumpType op_stpx(DisasContext *s, DisasOps *o) |
4469 | { |
4470 | tcg_gen_ld_i64(o->out, cpu_env, offsetof(CPUS390XState, psa)); |
4471 | tcg_gen_andi_i64(o->out, o->out, 0x7fffe000); |
4472 | return DISAS_NEXT; |
4473 | } |
4474 | |
4475 | static DisasJumpType op_stnosm(DisasContext *s, DisasOps *o) |
4476 | { |
4477 | uint64_t i2 = get_field(s->fields, i2); |
4478 | TCGv_i64 t; |
4479 | |
4480 | /* It is important to do what the instruction name says: STORE THEN. |
4481 | If we let the output hook perform the store then if we fault and |
4482 | restart, we'll have the wrong SYSTEM MASK in place. */ |
4483 | t = tcg_temp_new_i64(); |
4484 | tcg_gen_shri_i64(t, psw_mask, 56); |
4485 | tcg_gen_qemu_st8(t, o->addr1, get_mem_index(s)); |
4486 | tcg_temp_free_i64(t); |
4487 | |
4488 | if (s->fields->op == 0xac) { |
4489 | tcg_gen_andi_i64(psw_mask, psw_mask, |
4490 | (i2 << 56) | 0x00ffffffffffffffull); |
4491 | } else { |
4492 | tcg_gen_ori_i64(psw_mask, psw_mask, i2 << 56); |
4493 | } |
4494 | |
4495 | /* Exit to main loop to reevaluate s390_cpu_exec_interrupt. */ |
4496 | return DISAS_PC_STALE_NOCHAIN; |
4497 | } |
4498 | |
4499 | static DisasJumpType op_stura(DisasContext *s, DisasOps *o) |
4500 | { |
4501 | gen_helper_stura(cpu_env, o->in2, o->in1); |
4502 | return DISAS_NEXT; |
4503 | } |
4504 | |
4505 | static DisasJumpType op_sturg(DisasContext *s, DisasOps *o) |
4506 | { |
4507 | gen_helper_sturg(cpu_env, o->in2, o->in1); |
4508 | return DISAS_NEXT; |
4509 | } |
4510 | #endif |
4511 | |
4512 | static DisasJumpType op_stfle(DisasContext *s, DisasOps *o) |
4513 | { |
4514 | gen_helper_stfle(cc_op, cpu_env, o->in2); |
4515 | set_cc_static(s); |
4516 | return DISAS_NEXT; |
4517 | } |
4518 | |
4519 | static DisasJumpType op_st8(DisasContext *s, DisasOps *o) |
4520 | { |
4521 | tcg_gen_qemu_st8(o->in1, o->in2, get_mem_index(s)); |
4522 | return DISAS_NEXT; |
4523 | } |
4524 | |
4525 | static DisasJumpType op_st16(DisasContext *s, DisasOps *o) |
4526 | { |
4527 | tcg_gen_qemu_st16(o->in1, o->in2, get_mem_index(s)); |
4528 | return DISAS_NEXT; |
4529 | } |
4530 | |
4531 | static DisasJumpType op_st32(DisasContext *s, DisasOps *o) |
4532 | { |
4533 | tcg_gen_qemu_st32(o->in1, o->in2, get_mem_index(s)); |
4534 | return DISAS_NEXT; |
4535 | } |
4536 | |
4537 | static DisasJumpType op_st64(DisasContext *s, DisasOps *o) |
4538 | { |
4539 | tcg_gen_qemu_st64(o->in1, o->in2, get_mem_index(s)); |
4540 | return DISAS_NEXT; |
4541 | } |
4542 | |
4543 | static DisasJumpType op_stam(DisasContext *s, DisasOps *o) |
4544 | { |
4545 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
4546 | TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
4547 | gen_helper_stam(cpu_env, r1, o->in2, r3); |
4548 | tcg_temp_free_i32(r1); |
4549 | tcg_temp_free_i32(r3); |
4550 | return DISAS_NEXT; |
4551 | } |
4552 | |
4553 | static DisasJumpType op_stcm(DisasContext *s, DisasOps *o) |
4554 | { |
4555 | int m3 = get_field(s->fields, m3); |
4556 | int pos, base = s->insn->data; |
4557 | TCGv_i64 tmp = tcg_temp_new_i64(); |
4558 | |
4559 | pos = base + ctz32(m3) * 8; |
4560 | switch (m3) { |
4561 | case 0xf: |
4562 | /* Effectively a 32-bit store. */ |
4563 | tcg_gen_shri_i64(tmp, o->in1, pos); |
4564 | tcg_gen_qemu_st32(tmp, o->in2, get_mem_index(s)); |
4565 | break; |
4566 | |
4567 | case 0xc: |
4568 | case 0x6: |
4569 | case 0x3: |
4570 | /* Effectively a 16-bit store. */ |
4571 | tcg_gen_shri_i64(tmp, o->in1, pos); |
4572 | tcg_gen_qemu_st16(tmp, o->in2, get_mem_index(s)); |
4573 | break; |
4574 | |
4575 | case 0x8: |
4576 | case 0x4: |
4577 | case 0x2: |
4578 | case 0x1: |
4579 | /* Effectively an 8-bit store. */ |
4580 | tcg_gen_shri_i64(tmp, o->in1, pos); |
4581 | tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s)); |
4582 | break; |
4583 | |
4584 | default: |
4585 | /* This is going to be a sequence of shifts and stores. */ |
4586 | pos = base + 32 - 8; |
4587 | while (m3) { |
4588 | if (m3 & 0x8) { |
4589 | tcg_gen_shri_i64(tmp, o->in1, pos); |
4590 | tcg_gen_qemu_st8(tmp, o->in2, get_mem_index(s)); |
4591 | tcg_gen_addi_i64(o->in2, o->in2, 1); |
4592 | } |
4593 | m3 = (m3 << 1) & 0xf; |
4594 | pos -= 8; |
4595 | } |
4596 | break; |
4597 | } |
4598 | tcg_temp_free_i64(tmp); |
4599 | return DISAS_NEXT; |
4600 | } |
4601 | |
4602 | static DisasJumpType op_stm(DisasContext *s, DisasOps *o) |
4603 | { |
4604 | int r1 = get_field(s->fields, r1); |
4605 | int r3 = get_field(s->fields, r3); |
4606 | int size = s->insn->data; |
4607 | TCGv_i64 tsize = tcg_const_i64(size); |
4608 | |
4609 | while (1) { |
4610 | if (size == 8) { |
4611 | tcg_gen_qemu_st64(regs[r1], o->in2, get_mem_index(s)); |
4612 | } else { |
4613 | tcg_gen_qemu_st32(regs[r1], o->in2, get_mem_index(s)); |
4614 | } |
4615 | if (r1 == r3) { |
4616 | break; |
4617 | } |
4618 | tcg_gen_add_i64(o->in2, o->in2, tsize); |
4619 | r1 = (r1 + 1) & 15; |
4620 | } |
4621 | |
4622 | tcg_temp_free_i64(tsize); |
4623 | return DISAS_NEXT; |
4624 | } |
4625 | |
4626 | static DisasJumpType op_stmh(DisasContext *s, DisasOps *o) |
4627 | { |
4628 | int r1 = get_field(s->fields, r1); |
4629 | int r3 = get_field(s->fields, r3); |
4630 | TCGv_i64 t = tcg_temp_new_i64(); |
4631 | TCGv_i64 t4 = tcg_const_i64(4); |
4632 | TCGv_i64 t32 = tcg_const_i64(32); |
4633 | |
4634 | while (1) { |
4635 | tcg_gen_shl_i64(t, regs[r1], t32); |
4636 | tcg_gen_qemu_st32(t, o->in2, get_mem_index(s)); |
4637 | if (r1 == r3) { |
4638 | break; |
4639 | } |
4640 | tcg_gen_add_i64(o->in2, o->in2, t4); |
4641 | r1 = (r1 + 1) & 15; |
4642 | } |
4643 | |
4644 | tcg_temp_free_i64(t); |
4645 | tcg_temp_free_i64(t4); |
4646 | tcg_temp_free_i64(t32); |
4647 | return DISAS_NEXT; |
4648 | } |
4649 | |
4650 | static DisasJumpType op_stpq(DisasContext *s, DisasOps *o) |
4651 | { |
4652 | if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) { |
4653 | gen_helper_stpq(cpu_env, o->in2, o->out2, o->out); |
4654 | } else if (HAVE_ATOMIC128) { |
4655 | gen_helper_stpq_parallel(cpu_env, o->in2, o->out2, o->out); |
4656 | } else { |
4657 | gen_helper_exit_atomic(cpu_env); |
4658 | return DISAS_NORETURN; |
4659 | } |
4660 | return DISAS_NEXT; |
4661 | } |
4662 | |
4663 | static DisasJumpType op_srst(DisasContext *s, DisasOps *o) |
4664 | { |
4665 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
4666 | TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); |
4667 | |
4668 | gen_helper_srst(cpu_env, r1, r2); |
4669 | |
4670 | tcg_temp_free_i32(r1); |
4671 | tcg_temp_free_i32(r2); |
4672 | set_cc_static(s); |
4673 | return DISAS_NEXT; |
4674 | } |
4675 | |
4676 | static DisasJumpType op_srstu(DisasContext *s, DisasOps *o) |
4677 | { |
4678 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
4679 | TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); |
4680 | |
4681 | gen_helper_srstu(cpu_env, r1, r2); |
4682 | |
4683 | tcg_temp_free_i32(r1); |
4684 | tcg_temp_free_i32(r2); |
4685 | set_cc_static(s); |
4686 | return DISAS_NEXT; |
4687 | } |
4688 | |
4689 | static DisasJumpType op_sub(DisasContext *s, DisasOps *o) |
4690 | { |
4691 | tcg_gen_sub_i64(o->out, o->in1, o->in2); |
4692 | return DISAS_NEXT; |
4693 | } |
4694 | |
4695 | static DisasJumpType op_subb(DisasContext *s, DisasOps *o) |
4696 | { |
4697 | DisasCompare cmp; |
4698 | TCGv_i64 borrow; |
4699 | |
4700 | tcg_gen_sub_i64(o->out, o->in1, o->in2); |
4701 | |
4702 | /* The !borrow flag is the msb of CC. Since we want the inverse of |
4703 | that, we ask for a comparison of CC=0 | CC=1 -> mask of 8 | 4. */ |
4704 | disas_jcc(s, &cmp, 8 | 4); |
4705 | borrow = tcg_temp_new_i64(); |
4706 | if (cmp.is_64) { |
4707 | tcg_gen_setcond_i64(cmp.cond, borrow, cmp.u.s64.a, cmp.u.s64.b); |
4708 | } else { |
4709 | TCGv_i32 t = tcg_temp_new_i32(); |
4710 | tcg_gen_setcond_i32(cmp.cond, t, cmp.u.s32.a, cmp.u.s32.b); |
4711 | tcg_gen_extu_i32_i64(borrow, t); |
4712 | tcg_temp_free_i32(t); |
4713 | } |
4714 | free_compare(&cmp); |
4715 | |
4716 | tcg_gen_sub_i64(o->out, o->out, borrow); |
4717 | tcg_temp_free_i64(borrow); |
4718 | return DISAS_NEXT; |
4719 | } |
4720 | |
4721 | static DisasJumpType op_svc(DisasContext *s, DisasOps *o) |
4722 | { |
4723 | TCGv_i32 t; |
4724 | |
4725 | update_psw_addr(s); |
4726 | update_cc_op(s); |
4727 | |
4728 | t = tcg_const_i32(get_field(s->fields, i1) & 0xff); |
4729 | tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_code)); |
4730 | tcg_temp_free_i32(t); |
4731 | |
4732 | t = tcg_const_i32(s->ilen); |
4733 | tcg_gen_st_i32(t, cpu_env, offsetof(CPUS390XState, int_svc_ilen)); |
4734 | tcg_temp_free_i32(t); |
4735 | |
4736 | gen_exception(EXCP_SVC); |
4737 | return DISAS_NORETURN; |
4738 | } |
4739 | |
4740 | static DisasJumpType op_tam(DisasContext *s, DisasOps *o) |
4741 | { |
4742 | int cc = 0; |
4743 | |
4744 | cc |= (s->base.tb->flags & FLAG_MASK_64) ? 2 : 0; |
4745 | cc |= (s->base.tb->flags & FLAG_MASK_32) ? 1 : 0; |
4746 | gen_op_movi_cc(s, cc); |
4747 | return DISAS_NEXT; |
4748 | } |
4749 | |
4750 | static DisasJumpType op_tceb(DisasContext *s, DisasOps *o) |
4751 | { |
4752 | gen_helper_tceb(cc_op, cpu_env, o->in1, o->in2); |
4753 | set_cc_static(s); |
4754 | return DISAS_NEXT; |
4755 | } |
4756 | |
4757 | static DisasJumpType op_tcdb(DisasContext *s, DisasOps *o) |
4758 | { |
4759 | gen_helper_tcdb(cc_op, cpu_env, o->in1, o->in2); |
4760 | set_cc_static(s); |
4761 | return DISAS_NEXT; |
4762 | } |
4763 | |
4764 | static DisasJumpType op_tcxb(DisasContext *s, DisasOps *o) |
4765 | { |
4766 | gen_helper_tcxb(cc_op, cpu_env, o->out, o->out2, o->in2); |
4767 | set_cc_static(s); |
4768 | return DISAS_NEXT; |
4769 | } |
4770 | |
4771 | #ifndef CONFIG_USER_ONLY |
4772 | |
4773 | static DisasJumpType op_testblock(DisasContext *s, DisasOps *o) |
4774 | { |
4775 | gen_helper_testblock(cc_op, cpu_env, o->in2); |
4776 | set_cc_static(s); |
4777 | return DISAS_NEXT; |
4778 | } |
4779 | |
4780 | static DisasJumpType op_tprot(DisasContext *s, DisasOps *o) |
4781 | { |
4782 | gen_helper_tprot(cc_op, cpu_env, o->addr1, o->in2); |
4783 | set_cc_static(s); |
4784 | return DISAS_NEXT; |
4785 | } |
4786 | |
4787 | #endif |
4788 | |
4789 | static DisasJumpType op_tp(DisasContext *s, DisasOps *o) |
4790 | { |
4791 | TCGv_i32 l1 = tcg_const_i32(get_field(s->fields, l1) + 1); |
4792 | gen_helper_tp(cc_op, cpu_env, o->addr1, l1); |
4793 | tcg_temp_free_i32(l1); |
4794 | set_cc_static(s); |
4795 | return DISAS_NEXT; |
4796 | } |
4797 | |
4798 | static DisasJumpType op_tr(DisasContext *s, DisasOps *o) |
4799 | { |
4800 | TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); |
4801 | gen_helper_tr(cpu_env, l, o->addr1, o->in2); |
4802 | tcg_temp_free_i32(l); |
4803 | set_cc_static(s); |
4804 | return DISAS_NEXT; |
4805 | } |
4806 | |
4807 | static DisasJumpType op_tre(DisasContext *s, DisasOps *o) |
4808 | { |
4809 | gen_helper_tre(o->out, cpu_env, o->out, o->out2, o->in2); |
4810 | return_low128(o->out2); |
4811 | set_cc_static(s); |
4812 | return DISAS_NEXT; |
4813 | } |
4814 | |
4815 | static DisasJumpType op_trt(DisasContext *s, DisasOps *o) |
4816 | { |
4817 | TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); |
4818 | gen_helper_trt(cc_op, cpu_env, l, o->addr1, o->in2); |
4819 | tcg_temp_free_i32(l); |
4820 | set_cc_static(s); |
4821 | return DISAS_NEXT; |
4822 | } |
4823 | |
4824 | static DisasJumpType op_trtr(DisasContext *s, DisasOps *o) |
4825 | { |
4826 | TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); |
4827 | gen_helper_trtr(cc_op, cpu_env, l, o->addr1, o->in2); |
4828 | tcg_temp_free_i32(l); |
4829 | set_cc_static(s); |
4830 | return DISAS_NEXT; |
4831 | } |
4832 | |
4833 | static DisasJumpType op_trXX(DisasContext *s, DisasOps *o) |
4834 | { |
4835 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
4836 | TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); |
4837 | TCGv_i32 sizes = tcg_const_i32(s->insn->opc & 3); |
4838 | TCGv_i32 tst = tcg_temp_new_i32(); |
4839 | int m3 = get_field(s->fields, m3); |
4840 | |
4841 | if (!s390_has_feat(S390_FEAT_ETF2_ENH)) { |
4842 | m3 = 0; |
4843 | } |
4844 | if (m3 & 1) { |
4845 | tcg_gen_movi_i32(tst, -1); |
4846 | } else { |
4847 | tcg_gen_extrl_i64_i32(tst, regs[0]); |
4848 | if (s->insn->opc & 3) { |
4849 | tcg_gen_ext8u_i32(tst, tst); |
4850 | } else { |
4851 | tcg_gen_ext16u_i32(tst, tst); |
4852 | } |
4853 | } |
4854 | gen_helper_trXX(cc_op, cpu_env, r1, r2, tst, sizes); |
4855 | |
4856 | tcg_temp_free_i32(r1); |
4857 | tcg_temp_free_i32(r2); |
4858 | tcg_temp_free_i32(sizes); |
4859 | tcg_temp_free_i32(tst); |
4860 | set_cc_static(s); |
4861 | return DISAS_NEXT; |
4862 | } |
4863 | |
4864 | static DisasJumpType op_ts(DisasContext *s, DisasOps *o) |
4865 | { |
4866 | TCGv_i32 t1 = tcg_const_i32(0xff); |
4867 | tcg_gen_atomic_xchg_i32(t1, o->in2, t1, get_mem_index(s), MO_UB); |
4868 | tcg_gen_extract_i32(cc_op, t1, 7, 1); |
4869 | tcg_temp_free_i32(t1); |
4870 | set_cc_static(s); |
4871 | return DISAS_NEXT; |
4872 | } |
4873 | |
4874 | static DisasJumpType op_unpk(DisasContext *s, DisasOps *o) |
4875 | { |
4876 | TCGv_i32 l = tcg_const_i32(get_field(s->fields, l1)); |
4877 | gen_helper_unpk(cpu_env, l, o->addr1, o->in2); |
4878 | tcg_temp_free_i32(l); |
4879 | return DISAS_NEXT; |
4880 | } |
4881 | |
4882 | static DisasJumpType op_unpka(DisasContext *s, DisasOps *o) |
4883 | { |
4884 | int l1 = get_field(s->fields, l1) + 1; |
4885 | TCGv_i32 l; |
4886 | |
4887 | /* The length must not exceed 32 bytes. */ |
4888 | if (l1 > 32) { |
4889 | gen_program_exception(s, PGM_SPECIFICATION); |
4890 | return DISAS_NORETURN; |
4891 | } |
4892 | l = tcg_const_i32(l1); |
4893 | gen_helper_unpka(cc_op, cpu_env, o->addr1, l, o->in2); |
4894 | tcg_temp_free_i32(l); |
4895 | set_cc_static(s); |
4896 | return DISAS_NEXT; |
4897 | } |
4898 | |
4899 | static DisasJumpType op_unpku(DisasContext *s, DisasOps *o) |
4900 | { |
4901 | int l1 = get_field(s->fields, l1) + 1; |
4902 | TCGv_i32 l; |
4903 | |
4904 | /* The length must be even and should not exceed 64 bytes. */ |
4905 | if ((l1 & 1) || (l1 > 64)) { |
4906 | gen_program_exception(s, PGM_SPECIFICATION); |
4907 | return DISAS_NORETURN; |
4908 | } |
4909 | l = tcg_const_i32(l1); |
4910 | gen_helper_unpku(cc_op, cpu_env, o->addr1, l, o->in2); |
4911 | tcg_temp_free_i32(l); |
4912 | set_cc_static(s); |
4913 | return DISAS_NEXT; |
4914 | } |
4915 | |
4916 | |
4917 | static DisasJumpType op_xc(DisasContext *s, DisasOps *o) |
4918 | { |
4919 | int d1 = get_field(s->fields, d1); |
4920 | int d2 = get_field(s->fields, d2); |
4921 | int b1 = get_field(s->fields, b1); |
4922 | int b2 = get_field(s->fields, b2); |
4923 | int l = get_field(s->fields, l1); |
4924 | TCGv_i32 t32; |
4925 | |
4926 | o->addr1 = get_address(s, 0, b1, d1); |
4927 | |
4928 | /* If the addresses are identical, this is a store/memset of zero. */ |
4929 | if (b1 == b2 && d1 == d2 && (l + 1) <= 32) { |
4930 | o->in2 = tcg_const_i64(0); |
4931 | |
4932 | l++; |
4933 | while (l >= 8) { |
4934 | tcg_gen_qemu_st64(o->in2, o->addr1, get_mem_index(s)); |
4935 | l -= 8; |
4936 | if (l > 0) { |
4937 | tcg_gen_addi_i64(o->addr1, o->addr1, 8); |
4938 | } |
4939 | } |
4940 | if (l >= 4) { |
4941 | tcg_gen_qemu_st32(o->in2, o->addr1, get_mem_index(s)); |
4942 | l -= 4; |
4943 | if (l > 0) { |
4944 | tcg_gen_addi_i64(o->addr1, o->addr1, 4); |
4945 | } |
4946 | } |
4947 | if (l >= 2) { |
4948 | tcg_gen_qemu_st16(o->in2, o->addr1, get_mem_index(s)); |
4949 | l -= 2; |
4950 | if (l > 0) { |
4951 | tcg_gen_addi_i64(o->addr1, o->addr1, 2); |
4952 | } |
4953 | } |
4954 | if (l) { |
4955 | tcg_gen_qemu_st8(o->in2, o->addr1, get_mem_index(s)); |
4956 | } |
4957 | gen_op_movi_cc(s, 0); |
4958 | return DISAS_NEXT; |
4959 | } |
4960 | |
4961 | /* But in general we'll defer to a helper. */ |
4962 | o->in2 = get_address(s, 0, b2, d2); |
4963 | t32 = tcg_const_i32(l); |
4964 | gen_helper_xc(cc_op, cpu_env, t32, o->addr1, o->in2); |
4965 | tcg_temp_free_i32(t32); |
4966 | set_cc_static(s); |
4967 | return DISAS_NEXT; |
4968 | } |
4969 | |
4970 | static DisasJumpType op_xor(DisasContext *s, DisasOps *o) |
4971 | { |
4972 | tcg_gen_xor_i64(o->out, o->in1, o->in2); |
4973 | return DISAS_NEXT; |
4974 | } |
4975 | |
4976 | static DisasJumpType op_xori(DisasContext *s, DisasOps *o) |
4977 | { |
4978 | int shift = s->insn->data & 0xff; |
4979 | int size = s->insn->data >> 8; |
4980 | uint64_t mask = ((1ull << size) - 1) << shift; |
4981 | |
4982 | assert(!o->g_in2); |
4983 | tcg_gen_shli_i64(o->in2, o->in2, shift); |
4984 | tcg_gen_xor_i64(o->out, o->in1, o->in2); |
4985 | |
4986 | /* Produce the CC from only the bits manipulated. */ |
4987 | tcg_gen_andi_i64(cc_dst, o->out, mask); |
4988 | set_cc_nz_u64(s, cc_dst); |
4989 | return DISAS_NEXT; |
4990 | } |
4991 | |
4992 | static DisasJumpType op_xi(DisasContext *s, DisasOps *o) |
4993 | { |
4994 | o->in1 = tcg_temp_new_i64(); |
4995 | |
4996 | if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { |
4997 | tcg_gen_qemu_ld_tl(o->in1, o->addr1, get_mem_index(s), s->insn->data); |
4998 | } else { |
4999 | /* Perform the atomic operation in memory. */ |
5000 | tcg_gen_atomic_fetch_xor_i64(o->in1, o->addr1, o->in2, get_mem_index(s), |
5001 | s->insn->data); |
5002 | } |
5003 | |
5004 | /* Recompute also for atomic case: needed for setting CC. */ |
5005 | tcg_gen_xor_i64(o->out, o->in1, o->in2); |
5006 | |
5007 | if (!s390_has_feat(S390_FEAT_INTERLOCKED_ACCESS_2)) { |
5008 | tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), s->insn->data); |
5009 | } |
5010 | return DISAS_NEXT; |
5011 | } |
5012 | |
5013 | static DisasJumpType op_zero(DisasContext *s, DisasOps *o) |
5014 | { |
5015 | o->out = tcg_const_i64(0); |
5016 | return DISAS_NEXT; |
5017 | } |
5018 | |
5019 | static DisasJumpType op_zero2(DisasContext *s, DisasOps *o) |
5020 | { |
5021 | o->out = tcg_const_i64(0); |
5022 | o->out2 = o->out; |
5023 | o->g_out2 = true; |
5024 | return DISAS_NEXT; |
5025 | } |
5026 | |
5027 | #ifndef CONFIG_USER_ONLY |
5028 | static DisasJumpType op_clp(DisasContext *s, DisasOps *o) |
5029 | { |
5030 | TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); |
5031 | |
5032 | gen_helper_clp(cpu_env, r2); |
5033 | tcg_temp_free_i32(r2); |
5034 | set_cc_static(s); |
5035 | return DISAS_NEXT; |
5036 | } |
5037 | |
5038 | static DisasJumpType op_pcilg(DisasContext *s, DisasOps *o) |
5039 | { |
5040 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
5041 | TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); |
5042 | |
5043 | gen_helper_pcilg(cpu_env, r1, r2); |
5044 | tcg_temp_free_i32(r1); |
5045 | tcg_temp_free_i32(r2); |
5046 | set_cc_static(s); |
5047 | return DISAS_NEXT; |
5048 | } |
5049 | |
5050 | static DisasJumpType op_pcistg(DisasContext *s, DisasOps *o) |
5051 | { |
5052 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
5053 | TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); |
5054 | |
5055 | gen_helper_pcistg(cpu_env, r1, r2); |
5056 | tcg_temp_free_i32(r1); |
5057 | tcg_temp_free_i32(r2); |
5058 | set_cc_static(s); |
5059 | return DISAS_NEXT; |
5060 | } |
5061 | |
5062 | static DisasJumpType op_stpcifc(DisasContext *s, DisasOps *o) |
5063 | { |
5064 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
5065 | TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2)); |
5066 | |
5067 | gen_helper_stpcifc(cpu_env, r1, o->addr1, ar); |
5068 | tcg_temp_free_i32(ar); |
5069 | tcg_temp_free_i32(r1); |
5070 | set_cc_static(s); |
5071 | return DISAS_NEXT; |
5072 | } |
5073 | |
5074 | static DisasJumpType op_sic(DisasContext *s, DisasOps *o) |
5075 | { |
5076 | gen_helper_sic(cpu_env, o->in1, o->in2); |
5077 | return DISAS_NEXT; |
5078 | } |
5079 | |
5080 | static DisasJumpType op_rpcit(DisasContext *s, DisasOps *o) |
5081 | { |
5082 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
5083 | TCGv_i32 r2 = tcg_const_i32(get_field(s->fields, r2)); |
5084 | |
5085 | gen_helper_rpcit(cpu_env, r1, r2); |
5086 | tcg_temp_free_i32(r1); |
5087 | tcg_temp_free_i32(r2); |
5088 | set_cc_static(s); |
5089 | return DISAS_NEXT; |
5090 | } |
5091 | |
5092 | static DisasJumpType op_pcistb(DisasContext *s, DisasOps *o) |
5093 | { |
5094 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
5095 | TCGv_i32 r3 = tcg_const_i32(get_field(s->fields, r3)); |
5096 | TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2)); |
5097 | |
5098 | gen_helper_pcistb(cpu_env, r1, r3, o->addr1, ar); |
5099 | tcg_temp_free_i32(ar); |
5100 | tcg_temp_free_i32(r1); |
5101 | tcg_temp_free_i32(r3); |
5102 | set_cc_static(s); |
5103 | return DISAS_NEXT; |
5104 | } |
5105 | |
5106 | static DisasJumpType op_mpcifc(DisasContext *s, DisasOps *o) |
5107 | { |
5108 | TCGv_i32 r1 = tcg_const_i32(get_field(s->fields, r1)); |
5109 | TCGv_i32 ar = tcg_const_i32(get_field(s->fields, b2)); |
5110 | |
5111 | gen_helper_mpcifc(cpu_env, r1, o->addr1, ar); |
5112 | tcg_temp_free_i32(ar); |
5113 | tcg_temp_free_i32(r1); |
5114 | set_cc_static(s); |
5115 | return DISAS_NEXT; |
5116 | } |
5117 | #endif |
5118 | |
5119 | #include "translate_vx.inc.c" |
5120 | |
5121 | /* ====================================================================== */ |
5122 | /* The "Cc OUTput" generators. Given the generated output (and in some cases |
5123 | the original inputs), update the various cc data structures in order to |
5124 | be able to compute the new condition code. */ |
5125 | |
5126 | static void cout_abs32(DisasContext *s, DisasOps *o) |
5127 | { |
5128 | gen_op_update1_cc_i64(s, CC_OP_ABS_32, o->out); |
5129 | } |
5130 | |
5131 | static void cout_abs64(DisasContext *s, DisasOps *o) |
5132 | { |
5133 | gen_op_update1_cc_i64(s, CC_OP_ABS_64, o->out); |
5134 | } |
5135 | |
5136 | static void cout_adds32(DisasContext *s, DisasOps *o) |
5137 | { |
5138 | gen_op_update3_cc_i64(s, CC_OP_ADD_32, o->in1, o->in2, o->out); |
5139 | } |
5140 | |
5141 | static void cout_adds64(DisasContext *s, DisasOps *o) |
5142 | { |
5143 | gen_op_update3_cc_i64(s, CC_OP_ADD_64, o->in1, o->in2, o->out); |
5144 | } |
5145 | |
5146 | static void cout_addu32(DisasContext *s, DisasOps *o) |
5147 | { |
5148 | gen_op_update3_cc_i64(s, CC_OP_ADDU_32, o->in1, o->in2, o->out); |
5149 | } |
5150 | |
5151 | static void cout_addu64(DisasContext *s, DisasOps *o) |
5152 | { |
5153 | gen_op_update3_cc_i64(s, CC_OP_ADDU_64, o->in1, o->in2, o->out); |
5154 | } |
5155 | |
5156 | static void cout_addc32(DisasContext *s, DisasOps *o) |
5157 | { |
5158 | gen_op_update3_cc_i64(s, CC_OP_ADDC_32, o->in1, o->in2, o->out); |
5159 | } |
5160 | |
5161 | static void cout_addc64(DisasContext *s, DisasOps *o) |
5162 | { |
5163 | gen_op_update3_cc_i64(s, CC_OP_ADDC_64, o->in1, o->in2, o->out); |
5164 | } |
5165 | |
5166 | static void cout_cmps32(DisasContext *s, DisasOps *o) |
5167 | { |
5168 | gen_op_update2_cc_i64(s, CC_OP_LTGT_32, o->in1, o->in2); |
5169 | } |
5170 | |
5171 | static void cout_cmps64(DisasContext *s, DisasOps *o) |
5172 | { |
5173 | gen_op_update2_cc_i64(s, CC_OP_LTGT_64, o->in1, o->in2); |
5174 | } |
5175 | |
5176 | static void cout_cmpu32(DisasContext *s, DisasOps *o) |
5177 | { |
5178 | gen_op_update2_cc_i64(s, CC_OP_LTUGTU_32, o->in1, o->in2); |
5179 | } |
5180 | |
5181 | static void cout_cmpu64(DisasContext *s, DisasOps *o) |
5182 | { |
5183 | gen_op_update2_cc_i64(s, CC_OP_LTUGTU_64, o->in1, o->in2); |
5184 | } |
5185 | |
5186 | static void cout_f32(DisasContext *s, DisasOps *o) |
5187 | { |
5188 | gen_op_update1_cc_i64(s, CC_OP_NZ_F32, o->out); |
5189 | } |
5190 | |
5191 | static void cout_f64(DisasContext *s, DisasOps *o) |
5192 | { |
5193 | gen_op_update1_cc_i64(s, CC_OP_NZ_F64, o->out); |
5194 | } |
5195 | |
5196 | static void cout_f128(DisasContext *s, DisasOps *o) |
5197 | { |
5198 | gen_op_update2_cc_i64(s, CC_OP_NZ_F128, o->out, o->out2); |
5199 | } |
5200 | |
5201 | static void cout_nabs32(DisasContext *s, DisasOps *o) |
5202 | { |
5203 | gen_op_update1_cc_i64(s, CC_OP_NABS_32, o->out); |
5204 | } |
5205 | |
5206 | static void cout_nabs64(DisasContext *s, DisasOps *o) |
5207 | { |
5208 | gen_op_update1_cc_i64(s, CC_OP_NABS_64, o->out); |
5209 | } |
5210 | |
5211 | static void cout_neg32(DisasContext *s, DisasOps *o) |
5212 | { |
5213 | gen_op_update1_cc_i64(s, CC_OP_COMP_32, o->out); |
5214 | } |
5215 | |
5216 | static void cout_neg64(DisasContext *s, DisasOps *o) |
5217 | { |
5218 | gen_op_update1_cc_i64(s, CC_OP_COMP_64, o->out); |
5219 | } |
5220 | |
5221 | static void cout_nz32(DisasContext *s, DisasOps *o) |
5222 | { |
5223 | tcg_gen_ext32u_i64(cc_dst, o->out); |
5224 | gen_op_update1_cc_i64(s, CC_OP_NZ, cc_dst); |
5225 | } |
5226 | |
5227 | static void cout_nz64(DisasContext *s, DisasOps *o) |
5228 | { |
5229 | gen_op_update1_cc_i64(s, CC_OP_NZ, o->out); |
5230 | } |
5231 | |
5232 | static void cout_s32(DisasContext *s, DisasOps *o) |
5233 | { |
5234 | gen_op_update1_cc_i64(s, CC_OP_LTGT0_32, o->out); |
5235 | } |
5236 | |
5237 | static void cout_s64(DisasContext *s, DisasOps *o) |
5238 | { |
5239 | gen_op_update1_cc_i64(s, CC_OP_LTGT0_64, o->out); |
5240 | } |
5241 | |
5242 | static void cout_subs32(DisasContext *s, DisasOps *o) |
5243 | { |
5244 | gen_op_update3_cc_i64(s, CC_OP_SUB_32, o->in1, o->in2, o->out); |
5245 | } |
5246 | |
5247 | static void cout_subs64(DisasContext *s, DisasOps *o) |
5248 | { |
5249 | gen_op_update3_cc_i64(s, CC_OP_SUB_64, o->in1, o->in2, o->out); |
5250 | } |
5251 | |
5252 | static void cout_subu32(DisasContext *s, DisasOps *o) |
5253 | { |
5254 | gen_op_update3_cc_i64(s, CC_OP_SUBU_32, o->in1, o->in2, o->out); |
5255 | } |
5256 | |
5257 | static void cout_subu64(DisasContext *s, DisasOps *o) |
5258 | { |
5259 | gen_op_update3_cc_i64(s, CC_OP_SUBU_64, o->in1, o->in2, o->out); |
5260 | } |
5261 | |
5262 | static void cout_subb32(DisasContext *s, DisasOps *o) |
5263 | { |
5264 | gen_op_update3_cc_i64(s, CC_OP_SUBB_32, o->in1, o->in2, o->out); |
5265 | } |
5266 | |
5267 | static void cout_subb64(DisasContext *s, DisasOps *o) |
5268 | { |
5269 | gen_op_update3_cc_i64(s, CC_OP_SUBB_64, o->in1, o->in2, o->out); |
5270 | } |
5271 | |
5272 | static void cout_tm32(DisasContext *s, DisasOps *o) |
5273 | { |
5274 | gen_op_update2_cc_i64(s, CC_OP_TM_32, o->in1, o->in2); |
5275 | } |
5276 | |
5277 | static void cout_tm64(DisasContext *s, DisasOps *o) |
5278 | { |
5279 | gen_op_update2_cc_i64(s, CC_OP_TM_64, o->in1, o->in2); |
5280 | } |
5281 | |
5282 | /* ====================================================================== */ |
5283 | /* The "PREParation" generators. These initialize the DisasOps.OUT fields |
5284 | with the TCG register to which we will write. Used in combination with |
5285 | the "wout" generators, in some cases we need a new temporary, and in |
5286 | some cases we can write to a TCG global. */ |
5287 | |
5288 | static void prep_new(DisasContext *s, DisasFields *f, DisasOps *o) |
5289 | { |
5290 | o->out = tcg_temp_new_i64(); |
5291 | } |
5292 | #define SPEC_prep_new 0 |
5293 | |
5294 | static void prep_new_P(DisasContext *s, DisasFields *f, DisasOps *o) |
5295 | { |
5296 | o->out = tcg_temp_new_i64(); |
5297 | o->out2 = tcg_temp_new_i64(); |
5298 | } |
5299 | #define SPEC_prep_new_P 0 |
5300 | |
5301 | static void prep_r1(DisasContext *s, DisasFields *f, DisasOps *o) |
5302 | { |
5303 | o->out = regs[get_field(f, r1)]; |
5304 | o->g_out = true; |
5305 | } |
5306 | #define SPEC_prep_r1 0 |
5307 | |
5308 | static void prep_r1_P(DisasContext *s, DisasFields *f, DisasOps *o) |
5309 | { |
5310 | int r1 = get_field(f, r1); |
5311 | o->out = regs[r1]; |
5312 | o->out2 = regs[r1 + 1]; |
5313 | o->g_out = o->g_out2 = true; |
5314 | } |
5315 | #define SPEC_prep_r1_P SPEC_r1_even |
5316 | |
5317 | /* Whenever we need x1 in addition to other inputs, we'll load it to out/out2 */ |
5318 | static void prep_x1(DisasContext *s, DisasFields *f, DisasOps *o) |
5319 | { |
5320 | o->out = load_freg(get_field(f, r1)); |
5321 | o->out2 = load_freg(get_field(f, r1) + 2); |
5322 | } |
5323 | #define SPEC_prep_x1 SPEC_r1_f128 |
5324 | |
5325 | /* ====================================================================== */ |
5326 | /* The "Write OUTput" generators. These generally perform some non-trivial |
5327 | copy of data to TCG globals, or to main memory. The trivial cases are |
5328 | generally handled by having a "prep" generator install the TCG global |
5329 | as the destination of the operation. */ |
5330 | |
5331 | static void wout_r1(DisasContext *s, DisasFields *f, DisasOps *o) |
5332 | { |
5333 | store_reg(get_field(f, r1), o->out); |
5334 | } |
5335 | #define SPEC_wout_r1 0 |
5336 | |
5337 | static void wout_r1_8(DisasContext *s, DisasFields *f, DisasOps *o) |
5338 | { |
5339 | int r1 = get_field(f, r1); |
5340 | tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 8); |
5341 | } |
5342 | #define SPEC_wout_r1_8 0 |
5343 | |
5344 | static void wout_r1_16(DisasContext *s, DisasFields *f, DisasOps *o) |
5345 | { |
5346 | int r1 = get_field(f, r1); |
5347 | tcg_gen_deposit_i64(regs[r1], regs[r1], o->out, 0, 16); |
5348 | } |
5349 | #define SPEC_wout_r1_16 0 |
5350 | |
5351 | static void wout_r1_32(DisasContext *s, DisasFields *f, DisasOps *o) |
5352 | { |
5353 | store_reg32_i64(get_field(f, r1), o->out); |
5354 | } |
5355 | #define SPEC_wout_r1_32 0 |
5356 | |
5357 | static void wout_r1_32h(DisasContext *s, DisasFields *f, DisasOps *o) |
5358 | { |
5359 | store_reg32h_i64(get_field(f, r1), o->out); |
5360 | } |
5361 | #define SPEC_wout_r1_32h 0 |
5362 | |
5363 | static void wout_r1_P32(DisasContext *s, DisasFields *f, DisasOps *o) |
5364 | { |
5365 | int r1 = get_field(f, r1); |
5366 | store_reg32_i64(r1, o->out); |
5367 | store_reg32_i64(r1 + 1, o->out2); |
5368 | } |
5369 | #define SPEC_wout_r1_P32 SPEC_r1_even |
5370 | |
5371 | static void wout_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o) |
5372 | { |
5373 | int r1 = get_field(f, r1); |
5374 | store_reg32_i64(r1 + 1, o->out); |
5375 | tcg_gen_shri_i64(o->out, o->out, 32); |
5376 | store_reg32_i64(r1, o->out); |
5377 | } |
5378 | #define SPEC_wout_r1_D32 SPEC_r1_even |
5379 | |
5380 | static void wout_r3_P32(DisasContext *s, DisasFields *f, DisasOps *o) |
5381 | { |
5382 | int r3 = get_field(f, r3); |
5383 | store_reg32_i64(r3, o->out); |
5384 | store_reg32_i64(r3 + 1, o->out2); |
5385 | } |
5386 | #define SPEC_wout_r3_P32 SPEC_r3_even |
5387 | |
5388 | static void wout_r3_P64(DisasContext *s, DisasFields *f, DisasOps *o) |
5389 | { |
5390 | int r3 = get_field(f, r3); |
5391 | store_reg(r3, o->out); |
5392 | store_reg(r3 + 1, o->out2); |
5393 | } |
5394 | #define SPEC_wout_r3_P64 SPEC_r3_even |
5395 | |
5396 | static void wout_e1(DisasContext *s, DisasFields *f, DisasOps *o) |
5397 | { |
5398 | store_freg32_i64(get_field(f, r1), o->out); |
5399 | } |
5400 | #define SPEC_wout_e1 0 |
5401 | |
5402 | static void wout_f1(DisasContext *s, DisasFields *f, DisasOps *o) |
5403 | { |
5404 | store_freg(get_field(f, r1), o->out); |
5405 | } |
5406 | #define SPEC_wout_f1 0 |
5407 | |
5408 | static void wout_x1(DisasContext *s, DisasFields *f, DisasOps *o) |
5409 | { |
5410 | int f1 = get_field(s->fields, r1); |
5411 | store_freg(f1, o->out); |
5412 | store_freg(f1 + 2, o->out2); |
5413 | } |
5414 | #define SPEC_wout_x1 SPEC_r1_f128 |
5415 | |
5416 | static void wout_cond_r1r2_32(DisasContext *s, DisasFields *f, DisasOps *o) |
5417 | { |
5418 | if (get_field(f, r1) != get_field(f, r2)) { |
5419 | store_reg32_i64(get_field(f, r1), o->out); |
5420 | } |
5421 | } |
5422 | #define SPEC_wout_cond_r1r2_32 0 |
5423 | |
5424 | static void wout_cond_e1e2(DisasContext *s, DisasFields *f, DisasOps *o) |
5425 | { |
5426 | if (get_field(f, r1) != get_field(f, r2)) { |
5427 | store_freg32_i64(get_field(f, r1), o->out); |
5428 | } |
5429 | } |
5430 | #define SPEC_wout_cond_e1e2 0 |
5431 | |
5432 | static void wout_m1_8(DisasContext *s, DisasFields *f, DisasOps *o) |
5433 | { |
5434 | tcg_gen_qemu_st8(o->out, o->addr1, get_mem_index(s)); |
5435 | } |
5436 | #define SPEC_wout_m1_8 0 |
5437 | |
5438 | static void wout_m1_16(DisasContext *s, DisasFields *f, DisasOps *o) |
5439 | { |
5440 | tcg_gen_qemu_st16(o->out, o->addr1, get_mem_index(s)); |
5441 | } |
5442 | #define SPEC_wout_m1_16 0 |
5443 | |
5444 | #ifndef CONFIG_USER_ONLY |
5445 | static void wout_m1_16a(DisasContext *s, DisasFields *f, DisasOps *o) |
5446 | { |
5447 | tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUW | MO_ALIGN); |
5448 | } |
5449 | #define SPEC_wout_m1_16a 0 |
5450 | #endif |
5451 | |
5452 | static void wout_m1_32(DisasContext *s, DisasFields *f, DisasOps *o) |
5453 | { |
5454 | tcg_gen_qemu_st32(o->out, o->addr1, get_mem_index(s)); |
5455 | } |
5456 | #define SPEC_wout_m1_32 0 |
5457 | |
5458 | #ifndef CONFIG_USER_ONLY |
5459 | static void wout_m1_32a(DisasContext *s, DisasFields *f, DisasOps *o) |
5460 | { |
5461 | tcg_gen_qemu_st_tl(o->out, o->addr1, get_mem_index(s), MO_TEUL | MO_ALIGN); |
5462 | } |
5463 | #define SPEC_wout_m1_32a 0 |
5464 | #endif |
5465 | |
5466 | static void wout_m1_64(DisasContext *s, DisasFields *f, DisasOps *o) |
5467 | { |
5468 | tcg_gen_qemu_st64(o->out, o->addr1, get_mem_index(s)); |
5469 | } |
5470 | #define SPEC_wout_m1_64 0 |
5471 | |
5472 | #ifndef CONFIG_USER_ONLY |
5473 | static void wout_m1_64a(DisasContext *s, DisasFields *f, DisasOps *o) |
5474 | { |
5475 | tcg_gen_qemu_st_i64(o->out, o->addr1, get_mem_index(s), MO_TEQ | MO_ALIGN); |
5476 | } |
5477 | #define SPEC_wout_m1_64a 0 |
5478 | #endif |
5479 | |
5480 | static void wout_m2_32(DisasContext *s, DisasFields *f, DisasOps *o) |
5481 | { |
5482 | tcg_gen_qemu_st32(o->out, o->in2, get_mem_index(s)); |
5483 | } |
5484 | #define SPEC_wout_m2_32 0 |
5485 | |
5486 | static void wout_in2_r1(DisasContext *s, DisasFields *f, DisasOps *o) |
5487 | { |
5488 | store_reg(get_field(f, r1), o->in2); |
5489 | } |
5490 | #define SPEC_wout_in2_r1 0 |
5491 | |
5492 | static void wout_in2_r1_32(DisasContext *s, DisasFields *f, DisasOps *o) |
5493 | { |
5494 | store_reg32_i64(get_field(f, r1), o->in2); |
5495 | } |
5496 | #define SPEC_wout_in2_r1_32 0 |
5497 | |
5498 | /* ====================================================================== */ |
5499 | /* The "INput 1" generators. These load the first operand to an insn. */ |
5500 | |
5501 | static void in1_r1(DisasContext *s, DisasFields *f, DisasOps *o) |
5502 | { |
5503 | o->in1 = load_reg(get_field(f, r1)); |
5504 | } |
5505 | #define SPEC_in1_r1 0 |
5506 | |
5507 | static void in1_r1_o(DisasContext *s, DisasFields *f, DisasOps *o) |
5508 | { |
5509 | o->in1 = regs[get_field(f, r1)]; |
5510 | o->g_in1 = true; |
5511 | } |
5512 | #define SPEC_in1_r1_o 0 |
5513 | |
5514 | static void in1_r1_32s(DisasContext *s, DisasFields *f, DisasOps *o) |
5515 | { |
5516 | o->in1 = tcg_temp_new_i64(); |
5517 | tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1)]); |
5518 | } |
5519 | #define SPEC_in1_r1_32s 0 |
5520 | |
5521 | static void in1_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o) |
5522 | { |
5523 | o->in1 = tcg_temp_new_i64(); |
5524 | tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1)]); |
5525 | } |
5526 | #define SPEC_in1_r1_32u 0 |
5527 | |
5528 | static void in1_r1_sr32(DisasContext *s, DisasFields *f, DisasOps *o) |
5529 | { |
5530 | o->in1 = tcg_temp_new_i64(); |
5531 | tcg_gen_shri_i64(o->in1, regs[get_field(f, r1)], 32); |
5532 | } |
5533 | #define SPEC_in1_r1_sr32 0 |
5534 | |
5535 | static void in1_r1p1(DisasContext *s, DisasFields *f, DisasOps *o) |
5536 | { |
5537 | o->in1 = load_reg(get_field(f, r1) + 1); |
5538 | } |
5539 | #define SPEC_in1_r1p1 SPEC_r1_even |
5540 | |
5541 | static void in1_r1p1_32s(DisasContext *s, DisasFields *f, DisasOps *o) |
5542 | { |
5543 | o->in1 = tcg_temp_new_i64(); |
5544 | tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r1) + 1]); |
5545 | } |
5546 | #define SPEC_in1_r1p1_32s SPEC_r1_even |
5547 | |
5548 | static void in1_r1p1_32u(DisasContext *s, DisasFields *f, DisasOps *o) |
5549 | { |
5550 | o->in1 = tcg_temp_new_i64(); |
5551 | tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r1) + 1]); |
5552 | } |
5553 | #define SPEC_in1_r1p1_32u SPEC_r1_even |
5554 | |
5555 | static void in1_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o) |
5556 | { |
5557 | int r1 = get_field(f, r1); |
5558 | o->in1 = tcg_temp_new_i64(); |
5559 | tcg_gen_concat32_i64(o->in1, regs[r1 + 1], regs[r1]); |
5560 | } |
5561 | #define SPEC_in1_r1_D32 SPEC_r1_even |
5562 | |
5563 | static void in1_r2(DisasContext *s, DisasFields *f, DisasOps *o) |
5564 | { |
5565 | o->in1 = load_reg(get_field(f, r2)); |
5566 | } |
5567 | #define SPEC_in1_r2 0 |
5568 | |
5569 | static void in1_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o) |
5570 | { |
5571 | o->in1 = tcg_temp_new_i64(); |
5572 | tcg_gen_shri_i64(o->in1, regs[get_field(f, r2)], 32); |
5573 | } |
5574 | #define SPEC_in1_r2_sr32 0 |
5575 | |
5576 | static void in1_r3(DisasContext *s, DisasFields *f, DisasOps *o) |
5577 | { |
5578 | o->in1 = load_reg(get_field(f, r3)); |
5579 | } |
5580 | #define SPEC_in1_r3 0 |
5581 | |
5582 | static void in1_r3_o(DisasContext *s, DisasFields *f, DisasOps *o) |
5583 | { |
5584 | o->in1 = regs[get_field(f, r3)]; |
5585 | o->g_in1 = true; |
5586 | } |
5587 | #define SPEC_in1_r3_o 0 |
5588 | |
5589 | static void in1_r3_32s(DisasContext *s, DisasFields *f, DisasOps *o) |
5590 | { |
5591 | o->in1 = tcg_temp_new_i64(); |
5592 | tcg_gen_ext32s_i64(o->in1, regs[get_field(f, r3)]); |
5593 | } |
5594 | #define SPEC_in1_r3_32s 0 |
5595 | |
5596 | static void in1_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o) |
5597 | { |
5598 | o->in1 = tcg_temp_new_i64(); |
5599 | tcg_gen_ext32u_i64(o->in1, regs[get_field(f, r3)]); |
5600 | } |
5601 | #define SPEC_in1_r3_32u 0 |
5602 | |
5603 | static void in1_r3_D32(DisasContext *s, DisasFields *f, DisasOps *o) |
5604 | { |
5605 | int r3 = get_field(f, r3); |
5606 | o->in1 = tcg_temp_new_i64(); |
5607 | tcg_gen_concat32_i64(o->in1, regs[r3 + 1], regs[r3]); |
5608 | } |
5609 | #define SPEC_in1_r3_D32 SPEC_r3_even |
5610 | |
5611 | static void in1_e1(DisasContext *s, DisasFields *f, DisasOps *o) |
5612 | { |
5613 | o->in1 = load_freg32_i64(get_field(f, r1)); |
5614 | } |
5615 | #define SPEC_in1_e1 0 |
5616 | |
5617 | static void in1_f1(DisasContext *s, DisasFields *f, DisasOps *o) |
5618 | { |
5619 | o->in1 = load_freg(get_field(f, r1)); |
5620 | } |
5621 | #define SPEC_in1_f1 0 |
5622 | |
5623 | /* Load the high double word of an extended (128-bit) format FP number */ |
5624 | static void in1_x2h(DisasContext *s, DisasFields *f, DisasOps *o) |
5625 | { |
5626 | o->in1 = load_freg(get_field(f, r2)); |
5627 | } |
5628 | #define SPEC_in1_x2h SPEC_r2_f128 |
5629 | |
5630 | static void in1_f3(DisasContext *s, DisasFields *f, DisasOps *o) |
5631 | { |
5632 | o->in1 = load_freg(get_field(f, r3)); |
5633 | } |
5634 | #define SPEC_in1_f3 0 |
5635 | |
5636 | static void in1_la1(DisasContext *s, DisasFields *f, DisasOps *o) |
5637 | { |
5638 | o->addr1 = get_address(s, 0, get_field(f, b1), get_field(f, d1)); |
5639 | } |
5640 | #define SPEC_in1_la1 0 |
5641 | |
5642 | static void in1_la2(DisasContext *s, DisasFields *f, DisasOps *o) |
5643 | { |
5644 | int x2 = have_field(f, x2) ? get_field(f, x2) : 0; |
5645 | o->addr1 = get_address(s, x2, get_field(f, b2), get_field(f, d2)); |
5646 | } |
5647 | #define SPEC_in1_la2 0 |
5648 | |
5649 | static void in1_m1_8u(DisasContext *s, DisasFields *f, DisasOps *o) |
5650 | { |
5651 | in1_la1(s, f, o); |
5652 | o->in1 = tcg_temp_new_i64(); |
5653 | tcg_gen_qemu_ld8u(o->in1, o->addr1, get_mem_index(s)); |
5654 | } |
5655 | #define SPEC_in1_m1_8u 0 |
5656 | |
5657 | static void in1_m1_16s(DisasContext *s, DisasFields *f, DisasOps *o) |
5658 | { |
5659 | in1_la1(s, f, o); |
5660 | o->in1 = tcg_temp_new_i64(); |
5661 | tcg_gen_qemu_ld16s(o->in1, o->addr1, get_mem_index(s)); |
5662 | } |
5663 | #define SPEC_in1_m1_16s 0 |
5664 | |
5665 | static void in1_m1_16u(DisasContext *s, DisasFields *f, DisasOps *o) |
5666 | { |
5667 | in1_la1(s, f, o); |
5668 | o->in1 = tcg_temp_new_i64(); |
5669 | tcg_gen_qemu_ld16u(o->in1, o->addr1, get_mem_index(s)); |
5670 | } |
5671 | #define SPEC_in1_m1_16u 0 |
5672 | |
5673 | static void in1_m1_32s(DisasContext *s, DisasFields *f, DisasOps *o) |
5674 | { |
5675 | in1_la1(s, f, o); |
5676 | o->in1 = tcg_temp_new_i64(); |
5677 | tcg_gen_qemu_ld32s(o->in1, o->addr1, get_mem_index(s)); |
5678 | } |
5679 | #define SPEC_in1_m1_32s 0 |
5680 | |
5681 | static void in1_m1_32u(DisasContext *s, DisasFields *f, DisasOps *o) |
5682 | { |
5683 | in1_la1(s, f, o); |
5684 | o->in1 = tcg_temp_new_i64(); |
5685 | tcg_gen_qemu_ld32u(o->in1, o->addr1, get_mem_index(s)); |
5686 | } |
5687 | #define SPEC_in1_m1_32u 0 |
5688 | |
5689 | static void in1_m1_64(DisasContext *s, DisasFields *f, DisasOps *o) |
5690 | { |
5691 | in1_la1(s, f, o); |
5692 | o->in1 = tcg_temp_new_i64(); |
5693 | tcg_gen_qemu_ld64(o->in1, o->addr1, get_mem_index(s)); |
5694 | } |
5695 | #define SPEC_in1_m1_64 0 |
5696 | |
5697 | /* ====================================================================== */ |
5698 | /* The "INput 2" generators. These load the second operand to an insn. */ |
5699 | |
5700 | static void in2_r1_o(DisasContext *s, DisasFields *f, DisasOps *o) |
5701 | { |
5702 | o->in2 = regs[get_field(f, r1)]; |
5703 | o->g_in2 = true; |
5704 | } |
5705 | #define SPEC_in2_r1_o 0 |
5706 | |
5707 | static void in2_r1_16u(DisasContext *s, DisasFields *f, DisasOps *o) |
5708 | { |
5709 | o->in2 = tcg_temp_new_i64(); |
5710 | tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r1)]); |
5711 | } |
5712 | #define SPEC_in2_r1_16u 0 |
5713 | |
5714 | static void in2_r1_32u(DisasContext *s, DisasFields *f, DisasOps *o) |
5715 | { |
5716 | o->in2 = tcg_temp_new_i64(); |
5717 | tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r1)]); |
5718 | } |
5719 | #define SPEC_in2_r1_32u 0 |
5720 | |
5721 | static void in2_r1_D32(DisasContext *s, DisasFields *f, DisasOps *o) |
5722 | { |
5723 | int r1 = get_field(f, r1); |
5724 | o->in2 = tcg_temp_new_i64(); |
5725 | tcg_gen_concat32_i64(o->in2, regs[r1 + 1], regs[r1]); |
5726 | } |
5727 | #define SPEC_in2_r1_D32 SPEC_r1_even |
5728 | |
5729 | static void in2_r2(DisasContext *s, DisasFields *f, DisasOps *o) |
5730 | { |
5731 | o->in2 = load_reg(get_field(f, r2)); |
5732 | } |
5733 | #define SPEC_in2_r2 0 |
5734 | |
5735 | static void in2_r2_o(DisasContext *s, DisasFields *f, DisasOps *o) |
5736 | { |
5737 | o->in2 = regs[get_field(f, r2)]; |
5738 | o->g_in2 = true; |
5739 | } |
5740 | #define SPEC_in2_r2_o 0 |
5741 | |
5742 | static void in2_r2_nz(DisasContext *s, DisasFields *f, DisasOps *o) |
5743 | { |
5744 | int r2 = get_field(f, r2); |
5745 | if (r2 != 0) { |
5746 | o->in2 = load_reg(r2); |
5747 | } |
5748 | } |
5749 | #define SPEC_in2_r2_nz 0 |
5750 | |
5751 | static void in2_r2_8s(DisasContext *s, DisasFields *f, DisasOps *o) |
5752 | { |
5753 | o->in2 = tcg_temp_new_i64(); |
5754 | tcg_gen_ext8s_i64(o->in2, regs[get_field(f, r2)]); |
5755 | } |
5756 | #define SPEC_in2_r2_8s 0 |
5757 | |
5758 | static void in2_r2_8u(DisasContext *s, DisasFields *f, DisasOps *o) |
5759 | { |
5760 | o->in2 = tcg_temp_new_i64(); |
5761 | tcg_gen_ext8u_i64(o->in2, regs[get_field(f, r2)]); |
5762 | } |
5763 | #define SPEC_in2_r2_8u 0 |
5764 | |
5765 | static void in2_r2_16s(DisasContext *s, DisasFields *f, DisasOps *o) |
5766 | { |
5767 | o->in2 = tcg_temp_new_i64(); |
5768 | tcg_gen_ext16s_i64(o->in2, regs[get_field(f, r2)]); |
5769 | } |
5770 | #define SPEC_in2_r2_16s 0 |
5771 | |
5772 | static void in2_r2_16u(DisasContext *s, DisasFields *f, DisasOps *o) |
5773 | { |
5774 | o->in2 = tcg_temp_new_i64(); |
5775 | tcg_gen_ext16u_i64(o->in2, regs[get_field(f, r2)]); |
5776 | } |
5777 | #define SPEC_in2_r2_16u 0 |
5778 | |
5779 | static void in2_r3(DisasContext *s, DisasFields *f, DisasOps *o) |
5780 | { |
5781 | o->in2 = load_reg(get_field(f, r3)); |
5782 | } |
5783 | #define SPEC_in2_r3 0 |
5784 | |
5785 | static void in2_r3_sr32(DisasContext *s, DisasFields *f, DisasOps *o) |
5786 | { |
5787 | o->in2 = tcg_temp_new_i64(); |
5788 | tcg_gen_shri_i64(o->in2, regs[get_field(f, r3)], 32); |
5789 | } |
5790 | #define SPEC_in2_r3_sr32 0 |
5791 | |
5792 | static void in2_r3_32u(DisasContext *s, DisasFields *f, DisasOps *o) |
5793 | { |
5794 | o->in2 = tcg_temp_new_i64(); |
5795 | tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r3)]); |
5796 | } |
5797 | #define SPEC_in2_r3_32u 0 |
5798 | |
5799 | static void in2_r2_32s(DisasContext *s, DisasFields *f, DisasOps *o) |
5800 | { |
5801 | o->in2 = tcg_temp_new_i64(); |
5802 | tcg_gen_ext32s_i64(o->in2, regs[get_field(f, r2)]); |
5803 | } |
5804 | #define SPEC_in2_r2_32s 0 |
5805 | |
5806 | static void in2_r2_32u(DisasContext *s, DisasFields *f, DisasOps *o) |
5807 | { |
5808 | o->in2 = tcg_temp_new_i64(); |
5809 | tcg_gen_ext32u_i64(o->in2, regs[get_field(f, r2)]); |
5810 | } |
5811 | #define SPEC_in2_r2_32u 0 |
5812 | |
5813 | static void in2_r2_sr32(DisasContext *s, DisasFields *f, DisasOps *o) |
5814 | { |
5815 | o->in2 = tcg_temp_new_i64(); |
5816 | tcg_gen_shri_i64(o->in2, regs[get_field(f, r2)], 32); |
5817 | } |
5818 | #define SPEC_in2_r2_sr32 0 |
5819 | |
5820 | static void in2_e2(DisasContext *s, DisasFields *f, DisasOps *o) |
5821 | { |
5822 | o->in2 = load_freg32_i64(get_field(f, r2)); |
5823 | } |
5824 | #define SPEC_in2_e2 0 |
5825 | |
5826 | static void in2_f2(DisasContext *s, DisasFields *f, DisasOps *o) |
5827 | { |
5828 | o->in2 = load_freg(get_field(f, r2)); |
5829 | } |
5830 | #define SPEC_in2_f2 0 |
5831 | |
5832 | /* Load the low double word of an extended (128-bit) format FP number */ |
5833 | static void in2_x2l(DisasContext *s, DisasFields *f, DisasOps *o) |
5834 | { |
5835 | o->in2 = load_freg(get_field(f, r2) + 2); |
5836 | } |
5837 | #define SPEC_in2_x2l SPEC_r2_f128 |
5838 | |
5839 | static void in2_ra2(DisasContext *s, DisasFields *f, DisasOps *o) |
5840 | { |
5841 | o->in2 = get_address(s, 0, get_field(f, r2), 0); |
5842 | } |
5843 | #define SPEC_in2_ra2 0 |
5844 | |
5845 | static void in2_a2(DisasContext *s, DisasFields *f, DisasOps *o) |
5846 | { |
5847 | int x2 = have_field(f, x2) ? get_field(f, x2) : 0; |
5848 | o->in2 = get_address(s, x2, get_field(f, b2), get_field(f, d2)); |
5849 | } |
5850 | #define SPEC_in2_a2 0 |
5851 | |
5852 | static void in2_ri2(DisasContext *s, DisasFields *f, DisasOps *o) |
5853 | { |
5854 | o->in2 = tcg_const_i64(s->base.pc_next + (int64_t)get_field(f, i2) * 2); |
5855 | } |
5856 | #define SPEC_in2_ri2 0 |
5857 | |
5858 | static void in2_sh32(DisasContext *s, DisasFields *f, DisasOps *o) |
5859 | { |
5860 | help_l2_shift(s, f, o, 31); |
5861 | } |
5862 | #define SPEC_in2_sh32 0 |
5863 | |
5864 | static void in2_sh64(DisasContext *s, DisasFields *f, DisasOps *o) |
5865 | { |
5866 | help_l2_shift(s, f, o, 63); |
5867 | } |
5868 | #define SPEC_in2_sh64 0 |
5869 | |
5870 | static void in2_m2_8u(DisasContext *s, DisasFields *f, DisasOps *o) |
5871 | { |
5872 | in2_a2(s, f, o); |
5873 | tcg_gen_qemu_ld8u(o->in2, o->in2, get_mem_index(s)); |
5874 | } |
5875 | #define SPEC_in2_m2_8u 0 |
5876 | |
5877 | static void in2_m2_16s(DisasContext *s, DisasFields *f, DisasOps *o) |
5878 | { |
5879 | in2_a2(s, f, o); |
5880 | tcg_gen_qemu_ld16s(o->in2, o->in2, get_mem_index(s)); |
5881 | } |
5882 | #define SPEC_in2_m2_16s 0 |
5883 | |
5884 | static void in2_m2_16u(DisasContext *s, DisasFields *f, DisasOps *o) |
5885 | { |
5886 | in2_a2(s, f, o); |
5887 | tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); |
5888 | } |
5889 | #define SPEC_in2_m2_16u 0 |
5890 | |
5891 | static void in2_m2_32s(DisasContext *s, DisasFields *f, DisasOps *o) |
5892 | { |
5893 | in2_a2(s, f, o); |
5894 | tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); |
5895 | } |
5896 | #define SPEC_in2_m2_32s 0 |
5897 | |
5898 | static void in2_m2_32u(DisasContext *s, DisasFields *f, DisasOps *o) |
5899 | { |
5900 | in2_a2(s, f, o); |
5901 | tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); |
5902 | } |
5903 | #define SPEC_in2_m2_32u 0 |
5904 | |
5905 | #ifndef CONFIG_USER_ONLY |
5906 | static void in2_m2_32ua(DisasContext *s, DisasFields *f, DisasOps *o) |
5907 | { |
5908 | in2_a2(s, f, o); |
5909 | tcg_gen_qemu_ld_tl(o->in2, o->in2, get_mem_index(s), MO_TEUL | MO_ALIGN); |
5910 | } |
5911 | #define SPEC_in2_m2_32ua 0 |
5912 | #endif |
5913 | |
5914 | static void in2_m2_64(DisasContext *s, DisasFields *f, DisasOps *o) |
5915 | { |
5916 | in2_a2(s, f, o); |
5917 | tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); |
5918 | } |
5919 | #define SPEC_in2_m2_64 0 |
5920 | |
5921 | #ifndef CONFIG_USER_ONLY |
5922 | static void in2_m2_64a(DisasContext *s, DisasFields *f, DisasOps *o) |
5923 | { |
5924 | in2_a2(s, f, o); |
5925 | tcg_gen_qemu_ld_i64(o->in2, o->in2, get_mem_index(s), MO_TEQ | MO_ALIGN); |
5926 | } |
5927 | #define SPEC_in2_m2_64a 0 |
5928 | #endif |
5929 | |
5930 | static void in2_mri2_16u(DisasContext *s, DisasFields *f, DisasOps *o) |
5931 | { |
5932 | in2_ri2(s, f, o); |
5933 | tcg_gen_qemu_ld16u(o->in2, o->in2, get_mem_index(s)); |
5934 | } |
5935 | #define SPEC_in2_mri2_16u 0 |
5936 | |
5937 | static void in2_mri2_32s(DisasContext *s, DisasFields *f, DisasOps *o) |
5938 | { |
5939 | in2_ri2(s, f, o); |
5940 | tcg_gen_qemu_ld32s(o->in2, o->in2, get_mem_index(s)); |
5941 | } |
5942 | #define SPEC_in2_mri2_32s 0 |
5943 | |
5944 | static void in2_mri2_32u(DisasContext *s, DisasFields *f, DisasOps *o) |
5945 | { |
5946 | in2_ri2(s, f, o); |
5947 | tcg_gen_qemu_ld32u(o->in2, o->in2, get_mem_index(s)); |
5948 | } |
5949 | #define SPEC_in2_mri2_32u 0 |
5950 | |
5951 | static void in2_mri2_64(DisasContext *s, DisasFields *f, DisasOps *o) |
5952 | { |
5953 | in2_ri2(s, f, o); |
5954 | tcg_gen_qemu_ld64(o->in2, o->in2, get_mem_index(s)); |
5955 | } |
5956 | #define SPEC_in2_mri2_64 0 |
5957 | |
5958 | static void in2_i2(DisasContext *s, DisasFields *f, DisasOps *o) |
5959 | { |
5960 | o->in2 = tcg_const_i64(get_field(f, i2)); |
5961 | } |
5962 | #define SPEC_in2_i2 0 |
5963 | |
5964 | static void in2_i2_8u(DisasContext *s, DisasFields *f, DisasOps *o) |
5965 | { |
5966 | o->in2 = tcg_const_i64((uint8_t)get_field(f, i2)); |
5967 | } |
5968 | #define SPEC_in2_i2_8u 0 |
5969 | |
5970 | static void in2_i2_16u(DisasContext *s, DisasFields *f, DisasOps *o) |
5971 | { |
5972 | o->in2 = tcg_const_i64((uint16_t)get_field(f, i2)); |
5973 | } |
5974 | #define SPEC_in2_i2_16u 0 |
5975 | |
5976 | static void in2_i2_32u(DisasContext *s, DisasFields *f, DisasOps *o) |
5977 | { |
5978 | o->in2 = tcg_const_i64((uint32_t)get_field(f, i2)); |
5979 | } |
5980 | #define SPEC_in2_i2_32u 0 |
5981 | |
5982 | static void in2_i2_16u_shl(DisasContext *s, DisasFields *f, DisasOps *o) |
5983 | { |
5984 | uint64_t i2 = (uint16_t)get_field(f, i2); |
5985 | o->in2 = tcg_const_i64(i2 << s->insn->data); |
5986 | } |
5987 | #define SPEC_in2_i2_16u_shl 0 |
5988 | |
5989 | static void in2_i2_32u_shl(DisasContext *s, DisasFields *f, DisasOps *o) |
5990 | { |
5991 | uint64_t i2 = (uint32_t)get_field(f, i2); |
5992 | o->in2 = tcg_const_i64(i2 << s->insn->data); |
5993 | } |
5994 | #define SPEC_in2_i2_32u_shl 0 |
5995 | |
5996 | #ifndef CONFIG_USER_ONLY |
5997 | static void in2_insn(DisasContext *s, DisasFields *f, DisasOps *o) |
5998 | { |
5999 | o->in2 = tcg_const_i64(s->fields->raw_insn); |
6000 | } |
6001 | #define SPEC_in2_insn 0 |
6002 | #endif |
6003 | |
6004 | /* ====================================================================== */ |
6005 | |
6006 | /* Find opc within the table of insns. This is formulated as a switch |
6007 | statement so that (1) we get compile-time notice of cut-paste errors |
6008 | for duplicated opcodes, and (2) the compiler generates the binary |
6009 | search tree, rather than us having to post-process the table. */ |
6010 | |
6011 | #define C(OPC, NM, FT, FC, I1, I2, P, W, OP, CC) \ |
6012 | E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, 0) |
6013 | |
6014 | #define D(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D) \ |
6015 | E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, 0) |
6016 | |
6017 | #define F(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, FL) \ |
6018 | E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, 0, FL) |
6019 | |
6020 | #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) insn_ ## NM, |
6021 | |
6022 | enum DisasInsnEnum { |
6023 | #include "insn-data.def" |
6024 | }; |
6025 | |
6026 | #undef E |
6027 | #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) { \ |
6028 | .opc = OPC, \ |
6029 | .flags = FL, \ |
6030 | .fmt = FMT_##FT, \ |
6031 | .fac = FAC_##FC, \ |
6032 | .spec = SPEC_in1_##I1 | SPEC_in2_##I2 | SPEC_prep_##P | SPEC_wout_##W, \ |
6033 | .name = #NM, \ |
6034 | .help_in1 = in1_##I1, \ |
6035 | .help_in2 = in2_##I2, \ |
6036 | .help_prep = prep_##P, \ |
6037 | .help_wout = wout_##W, \ |
6038 | .help_cout = cout_##CC, \ |
6039 | .help_op = op_##OP, \ |
6040 | .data = D \ |
6041 | }, |
6042 | |
6043 | /* Allow 0 to be used for NULL in the table below. */ |
6044 | #define in1_0 NULL |
6045 | #define in2_0 NULL |
6046 | #define prep_0 NULL |
6047 | #define wout_0 NULL |
6048 | #define cout_0 NULL |
6049 | #define op_0 NULL |
6050 | |
6051 | #define SPEC_in1_0 0 |
6052 | #define SPEC_in2_0 0 |
6053 | #define SPEC_prep_0 0 |
6054 | #define SPEC_wout_0 0 |
6055 | |
6056 | /* Give smaller names to the various facilities. */ |
6057 | #define FAC_Z S390_FEAT_ZARCH |
6058 | #define FAC_CASS S390_FEAT_COMPARE_AND_SWAP_AND_STORE |
6059 | #define FAC_DFP S390_FEAT_DFP |
6060 | #define FAC_DFPR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* DFP-rounding */ |
6061 | #define FAC_DO S390_FEAT_STFLE_45 /* distinct-operands */ |
6062 | #define FAC_EE S390_FEAT_EXECUTE_EXT |
6063 | #define FAC_EI S390_FEAT_EXTENDED_IMMEDIATE |
6064 | #define FAC_FPE S390_FEAT_FLOATING_POINT_EXT |
6065 | #define FAC_FPSSH S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPS-sign-handling */ |
6066 | #define FAC_FPRGR S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* FPR-GR-transfer */ |
6067 | #define FAC_GIE S390_FEAT_GENERAL_INSTRUCTIONS_EXT |
6068 | #define FAC_HFP_MA S390_FEAT_HFP_MADDSUB |
6069 | #define FAC_HW S390_FEAT_STFLE_45 /* high-word */ |
6070 | #define FAC_IEEEE_SIM S390_FEAT_FLOATING_POINT_SUPPPORT_ENH /* IEEE-exception-simulation */ |
6071 | #define FAC_MIE S390_FEAT_STFLE_49 /* misc-instruction-extensions */ |
6072 | #define FAC_LAT S390_FEAT_STFLE_49 /* load-and-trap */ |
6073 | #define FAC_LOC S390_FEAT_STFLE_45 /* load/store on condition 1 */ |
6074 | #define FAC_LOC2 S390_FEAT_STFLE_53 /* load/store on condition 2 */ |
6075 | #define FAC_LD S390_FEAT_LONG_DISPLACEMENT |
6076 | #define FAC_PC S390_FEAT_STFLE_45 /* population count */ |
6077 | #define FAC_SCF S390_FEAT_STORE_CLOCK_FAST |
6078 | #define FAC_SFLE S390_FEAT_STFLE |
6079 | #define FAC_ILA S390_FEAT_STFLE_45 /* interlocked-access-facility 1 */ |
6080 | #define FAC_MVCOS S390_FEAT_MOVE_WITH_OPTIONAL_SPEC |
6081 | #define FAC_LPP S390_FEAT_SET_PROGRAM_PARAMETERS /* load-program-parameter */ |
6082 | #define FAC_DAT_ENH S390_FEAT_DAT_ENH |
6083 | #define FAC_E2 S390_FEAT_EXTENDED_TRANSLATION_2 |
6084 | #define FAC_EH S390_FEAT_STFLE_49 /* execution-hint */ |
6085 | #define FAC_PPA S390_FEAT_STFLE_49 /* processor-assist */ |
6086 | #define FAC_LZRB S390_FEAT_STFLE_53 /* load-and-zero-rightmost-byte */ |
6087 | #define FAC_ETF3 S390_FEAT_EXTENDED_TRANSLATION_3 |
6088 | #define FAC_MSA S390_FEAT_MSA /* message-security-assist facility */ |
6089 | #define FAC_MSA3 S390_FEAT_MSA_EXT_3 /* msa-extension-3 facility */ |
6090 | #define FAC_MSA4 S390_FEAT_MSA_EXT_4 /* msa-extension-4 facility */ |
6091 | #define FAC_MSA5 S390_FEAT_MSA_EXT_5 /* msa-extension-5 facility */ |
6092 | #define FAC_ECT S390_FEAT_EXTRACT_CPU_TIME |
6093 | #define FAC_PCI S390_FEAT_ZPCI /* z/PCI facility */ |
6094 | #define FAC_AIS S390_FEAT_ADAPTER_INT_SUPPRESSION |
6095 | #define FAC_V S390_FEAT_VECTOR /* vector facility */ |
6096 | #define FAC_VE S390_FEAT_VECTOR_ENH /* vector enhancements facility 1 */ |
6097 | |
6098 | static const DisasInsn insn_info[] = { |
6099 | #include "insn-data.def" |
6100 | }; |
6101 | |
6102 | #undef E |
6103 | #define E(OPC, NM, FT, FC, I1, I2, P, W, OP, CC, D, FL) \ |
6104 | case OPC: return &insn_info[insn_ ## NM]; |
6105 | |
6106 | static const DisasInsn *lookup_opc(uint16_t opc) |
6107 | { |
6108 | switch (opc) { |
6109 | #include "insn-data.def" |
6110 | default: |
6111 | return NULL; |
6112 | } |
6113 | } |
6114 | |
6115 | #undef F |
6116 | #undef E |
6117 | #undef D |
6118 | #undef C |
6119 | |
6120 | /* Extract a field from the insn. The INSN should be left-aligned in |
6121 | the uint64_t so that we can more easily utilize the big-bit-endian |
6122 | definitions we extract from the Principals of Operation. */ |
6123 | |
6124 | static void (DisasFields *o, const DisasField *f, uint64_t insn) |
6125 | { |
6126 | uint32_t r, m; |
6127 | |
6128 | if (f->size == 0) { |
6129 | return; |
6130 | } |
6131 | |
6132 | /* Zero extract the field from the insn. */ |
6133 | r = (insn << f->beg) >> (64 - f->size); |
6134 | |
6135 | /* Sign-extend, or un-swap the field as necessary. */ |
6136 | switch (f->type) { |
6137 | case 0: /* unsigned */ |
6138 | break; |
6139 | case 1: /* signed */ |
6140 | assert(f->size <= 32); |
6141 | m = 1u << (f->size - 1); |
6142 | r = (r ^ m) - m; |
6143 | break; |
6144 | case 2: /* dl+dh split, signed 20 bit. */ |
6145 | r = ((int8_t)r << 12) | (r >> 8); |
6146 | break; |
6147 | case 3: /* MSB stored in RXB */ |
6148 | g_assert(f->size == 4); |
6149 | switch (f->beg) { |
6150 | case 8: |
6151 | r |= extract64(insn, 63 - 36, 1) << 4; |
6152 | break; |
6153 | case 12: |
6154 | r |= extract64(insn, 63 - 37, 1) << 4; |
6155 | break; |
6156 | case 16: |
6157 | r |= extract64(insn, 63 - 38, 1) << 4; |
6158 | break; |
6159 | case 32: |
6160 | r |= extract64(insn, 63 - 39, 1) << 4; |
6161 | break; |
6162 | default: |
6163 | g_assert_not_reached(); |
6164 | } |
6165 | break; |
6166 | default: |
6167 | abort(); |
6168 | } |
6169 | |
6170 | /* Validate that the "compressed" encoding we selected above is valid. |
6171 | I.e. we havn't make two different original fields overlap. */ |
6172 | assert(((o->presentC >> f->indexC) & 1) == 0); |
6173 | o->presentC |= 1 << f->indexC; |
6174 | o->presentO |= 1 << f->indexO; |
6175 | |
6176 | o->c[f->indexC] = r; |
6177 | } |
6178 | |
6179 | /* Lookup the insn at the current PC, extracting the operands into O and |
6180 | returning the info struct for the insn. Returns NULL for invalid insn. */ |
6181 | |
6182 | static const DisasInsn *(CPUS390XState *env, DisasContext *s, |
6183 | DisasFields *f) |
6184 | { |
6185 | uint64_t insn, pc = s->base.pc_next; |
6186 | int op, op2, ilen; |
6187 | const DisasInsn *info; |
6188 | |
6189 | if (unlikely(s->ex_value)) { |
6190 | /* Drop the EX data now, so that it's clear on exception paths. */ |
6191 | TCGv_i64 zero = tcg_const_i64(0); |
6192 | tcg_gen_st_i64(zero, cpu_env, offsetof(CPUS390XState, ex_value)); |
6193 | tcg_temp_free_i64(zero); |
6194 | |
6195 | /* Extract the values saved by EXECUTE. */ |
6196 | insn = s->ex_value & 0xffffffffffff0000ull; |
6197 | ilen = s->ex_value & 0xf; |
6198 | op = insn >> 56; |
6199 | } else { |
6200 | insn = ld_code2(env, pc); |
6201 | op = (insn >> 8) & 0xff; |
6202 | ilen = get_ilen(op); |
6203 | switch (ilen) { |
6204 | case 2: |
6205 | insn = insn << 48; |
6206 | break; |
6207 | case 4: |
6208 | insn = ld_code4(env, pc) << 32; |
6209 | break; |
6210 | case 6: |
6211 | insn = (insn << 48) | (ld_code4(env, pc + 2) << 16); |
6212 | break; |
6213 | default: |
6214 | g_assert_not_reached(); |
6215 | } |
6216 | } |
6217 | s->pc_tmp = s->base.pc_next + ilen; |
6218 | s->ilen = ilen; |
6219 | |
6220 | /* We can't actually determine the insn format until we've looked up |
6221 | the full insn opcode. Which we can't do without locating the |
6222 | secondary opcode. Assume by default that OP2 is at bit 40; for |
6223 | those smaller insns that don't actually have a secondary opcode |
6224 | this will correctly result in OP2 = 0. */ |
6225 | switch (op) { |
6226 | case 0x01: /* E */ |
6227 | case 0x80: /* S */ |
6228 | case 0x82: /* S */ |
6229 | case 0x93: /* S */ |
6230 | case 0xb2: /* S, RRF, RRE, IE */ |
6231 | case 0xb3: /* RRE, RRD, RRF */ |
6232 | case 0xb9: /* RRE, RRF */ |
6233 | case 0xe5: /* SSE, SIL */ |
6234 | op2 = (insn << 8) >> 56; |
6235 | break; |
6236 | case 0xa5: /* RI */ |
6237 | case 0xa7: /* RI */ |
6238 | case 0xc0: /* RIL */ |
6239 | case 0xc2: /* RIL */ |
6240 | case 0xc4: /* RIL */ |
6241 | case 0xc6: /* RIL */ |
6242 | case 0xc8: /* SSF */ |
6243 | case 0xcc: /* RIL */ |
6244 | op2 = (insn << 12) >> 60; |
6245 | break; |
6246 | case 0xc5: /* MII */ |
6247 | case 0xc7: /* SMI */ |
6248 | case 0xd0 ... 0xdf: /* SS */ |
6249 | case 0xe1: /* SS */ |
6250 | case 0xe2: /* SS */ |
6251 | case 0xe8: /* SS */ |
6252 | case 0xe9: /* SS */ |
6253 | case 0xea: /* SS */ |
6254 | case 0xee ... 0xf3: /* SS */ |
6255 | case 0xf8 ... 0xfd: /* SS */ |
6256 | op2 = 0; |
6257 | break; |
6258 | default: |
6259 | op2 = (insn << 40) >> 56; |
6260 | break; |
6261 | } |
6262 | |
6263 | memset(f, 0, sizeof(*f)); |
6264 | f->raw_insn = insn; |
6265 | f->op = op; |
6266 | f->op2 = op2; |
6267 | |
6268 | /* Lookup the instruction. */ |
6269 | info = lookup_opc(op << 8 | op2); |
6270 | |
6271 | /* If we found it, extract the operands. */ |
6272 | if (info != NULL) { |
6273 | DisasFormat fmt = info->fmt; |
6274 | int i; |
6275 | |
6276 | for (i = 0; i < NUM_C_FIELD; ++i) { |
6277 | extract_field(f, &format_info[fmt].op[i], insn); |
6278 | } |
6279 | } |
6280 | return info; |
6281 | } |
6282 | |
6283 | static bool is_afp_reg(int reg) |
6284 | { |
6285 | return reg % 2 || reg > 6; |
6286 | } |
6287 | |
6288 | static bool is_fp_pair(int reg) |
6289 | { |
6290 | /* 0,1,4,5,8,9,12,13: to exclude the others, check for single bit */ |
6291 | return !(reg & 0x2); |
6292 | } |
6293 | |
6294 | static DisasJumpType translate_one(CPUS390XState *env, DisasContext *s) |
6295 | { |
6296 | const DisasInsn *insn; |
6297 | DisasJumpType ret = DISAS_NEXT; |
6298 | DisasFields f; |
6299 | DisasOps o = {}; |
6300 | |
6301 | /* Search for the insn in the table. */ |
6302 | insn = extract_insn(env, s, &f); |
6303 | |
6304 | /* Not found means unimplemented/illegal opcode. */ |
6305 | if (insn == NULL) { |
6306 | qemu_log_mask(LOG_UNIMP, "unimplemented opcode 0x%02x%02x\n" , |
6307 | f.op, f.op2); |
6308 | gen_illegal_opcode(s); |
6309 | return DISAS_NORETURN; |
6310 | } |
6311 | |
6312 | #ifndef CONFIG_USER_ONLY |
6313 | if (s->base.tb->flags & FLAG_MASK_PER) { |
6314 | TCGv_i64 addr = tcg_const_i64(s->base.pc_next); |
6315 | gen_helper_per_ifetch(cpu_env, addr); |
6316 | tcg_temp_free_i64(addr); |
6317 | } |
6318 | #endif |
6319 | |
6320 | /* process flags */ |
6321 | if (insn->flags) { |
6322 | /* privileged instruction */ |
6323 | if ((s->base.tb->flags & FLAG_MASK_PSTATE) && (insn->flags & IF_PRIV)) { |
6324 | gen_program_exception(s, PGM_PRIVILEGED); |
6325 | return DISAS_NORETURN; |
6326 | } |
6327 | |
6328 | /* if AFP is not enabled, instructions and registers are forbidden */ |
6329 | if (!(s->base.tb->flags & FLAG_MASK_AFP)) { |
6330 | uint8_t dxc = 0; |
6331 | |
6332 | if ((insn->flags & IF_AFP1) && is_afp_reg(get_field(&f, r1))) { |
6333 | dxc = 1; |
6334 | } |
6335 | if ((insn->flags & IF_AFP2) && is_afp_reg(get_field(&f, r2))) { |
6336 | dxc = 1; |
6337 | } |
6338 | if ((insn->flags & IF_AFP3) && is_afp_reg(get_field(&f, r3))) { |
6339 | dxc = 1; |
6340 | } |
6341 | if (insn->flags & IF_BFP) { |
6342 | dxc = 2; |
6343 | } |
6344 | if (insn->flags & IF_DFP) { |
6345 | dxc = 3; |
6346 | } |
6347 | if (insn->flags & IF_VEC) { |
6348 | dxc = 0xfe; |
6349 | } |
6350 | if (dxc) { |
6351 | gen_data_exception(dxc); |
6352 | return DISAS_NORETURN; |
6353 | } |
6354 | } |
6355 | |
6356 | /* if vector instructions not enabled, executing them is forbidden */ |
6357 | if (insn->flags & IF_VEC) { |
6358 | if (!((s->base.tb->flags & FLAG_MASK_VECTOR))) { |
6359 | gen_data_exception(0xfe); |
6360 | return DISAS_NORETURN; |
6361 | } |
6362 | } |
6363 | } |
6364 | |
6365 | /* Check for insn specification exceptions. */ |
6366 | if (insn->spec) { |
6367 | if ((insn->spec & SPEC_r1_even && get_field(&f, r1) & 1) || |
6368 | (insn->spec & SPEC_r2_even && get_field(&f, r2) & 1) || |
6369 | (insn->spec & SPEC_r3_even && get_field(&f, r3) & 1) || |
6370 | (insn->spec & SPEC_r1_f128 && !is_fp_pair(get_field(&f, r1))) || |
6371 | (insn->spec & SPEC_r2_f128 && !is_fp_pair(get_field(&f, r2)))) { |
6372 | gen_program_exception(s, PGM_SPECIFICATION); |
6373 | return DISAS_NORETURN; |
6374 | } |
6375 | } |
6376 | |
6377 | /* Set up the strutures we use to communicate with the helpers. */ |
6378 | s->insn = insn; |
6379 | s->fields = &f; |
6380 | |
6381 | /* Implement the instruction. */ |
6382 | if (insn->help_in1) { |
6383 | insn->help_in1(s, &f, &o); |
6384 | } |
6385 | if (insn->help_in2) { |
6386 | insn->help_in2(s, &f, &o); |
6387 | } |
6388 | if (insn->help_prep) { |
6389 | insn->help_prep(s, &f, &o); |
6390 | } |
6391 | if (insn->help_op) { |
6392 | ret = insn->help_op(s, &o); |
6393 | } |
6394 | if (ret != DISAS_NORETURN) { |
6395 | if (insn->help_wout) { |
6396 | insn->help_wout(s, &f, &o); |
6397 | } |
6398 | if (insn->help_cout) { |
6399 | insn->help_cout(s, &o); |
6400 | } |
6401 | } |
6402 | |
6403 | /* Free any temporaries created by the helpers. */ |
6404 | if (o.out && !o.g_out) { |
6405 | tcg_temp_free_i64(o.out); |
6406 | } |
6407 | if (o.out2 && !o.g_out2) { |
6408 | tcg_temp_free_i64(o.out2); |
6409 | } |
6410 | if (o.in1 && !o.g_in1) { |
6411 | tcg_temp_free_i64(o.in1); |
6412 | } |
6413 | if (o.in2 && !o.g_in2) { |
6414 | tcg_temp_free_i64(o.in2); |
6415 | } |
6416 | if (o.addr1) { |
6417 | tcg_temp_free_i64(o.addr1); |
6418 | } |
6419 | |
6420 | #ifndef CONFIG_USER_ONLY |
6421 | if (s->base.tb->flags & FLAG_MASK_PER) { |
6422 | /* An exception might be triggered, save PSW if not already done. */ |
6423 | if (ret == DISAS_NEXT || ret == DISAS_PC_STALE) { |
6424 | tcg_gen_movi_i64(psw_addr, s->pc_tmp); |
6425 | } |
6426 | |
6427 | /* Call the helper to check for a possible PER exception. */ |
6428 | gen_helper_per_check_exception(cpu_env); |
6429 | } |
6430 | #endif |
6431 | |
6432 | /* Advance to the next instruction. */ |
6433 | s->base.pc_next = s->pc_tmp; |
6434 | return ret; |
6435 | } |
6436 | |
6437 | static void s390x_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) |
6438 | { |
6439 | DisasContext *dc = container_of(dcbase, DisasContext, base); |
6440 | |
6441 | /* 31-bit mode */ |
6442 | if (!(dc->base.tb->flags & FLAG_MASK_64)) { |
6443 | dc->base.pc_first &= 0x7fffffff; |
6444 | dc->base.pc_next = dc->base.pc_first; |
6445 | } |
6446 | |
6447 | dc->cc_op = CC_OP_DYNAMIC; |
6448 | dc->ex_value = dc->base.tb->cs_base; |
6449 | dc->do_debug = dc->base.singlestep_enabled; |
6450 | } |
6451 | |
6452 | static void s390x_tr_tb_start(DisasContextBase *db, CPUState *cs) |
6453 | { |
6454 | } |
6455 | |
6456 | static void s390x_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) |
6457 | { |
6458 | DisasContext *dc = container_of(dcbase, DisasContext, base); |
6459 | |
6460 | tcg_gen_insn_start(dc->base.pc_next, dc->cc_op); |
6461 | } |
6462 | |
6463 | static bool s390x_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, |
6464 | const CPUBreakpoint *bp) |
6465 | { |
6466 | DisasContext *dc = container_of(dcbase, DisasContext, base); |
6467 | |
6468 | dc->base.is_jmp = DISAS_PC_STALE; |
6469 | dc->do_debug = true; |
6470 | /* The address covered by the breakpoint must be included in |
6471 | [tb->pc, tb->pc + tb->size) in order to for it to be |
6472 | properly cleared -- thus we increment the PC here so that |
6473 | the logic setting tb->size does the right thing. */ |
6474 | dc->base.pc_next += 2; |
6475 | return true; |
6476 | } |
6477 | |
6478 | static void s390x_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) |
6479 | { |
6480 | CPUS390XState *env = cs->env_ptr; |
6481 | DisasContext *dc = container_of(dcbase, DisasContext, base); |
6482 | |
6483 | dc->base.is_jmp = translate_one(env, dc); |
6484 | if (dc->base.is_jmp == DISAS_NEXT) { |
6485 | uint64_t page_start; |
6486 | |
6487 | page_start = dc->base.pc_first & TARGET_PAGE_MASK; |
6488 | if (dc->base.pc_next - page_start >= TARGET_PAGE_SIZE || dc->ex_value) { |
6489 | dc->base.is_jmp = DISAS_TOO_MANY; |
6490 | } |
6491 | } |
6492 | } |
6493 | |
6494 | static void s390x_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) |
6495 | { |
6496 | DisasContext *dc = container_of(dcbase, DisasContext, base); |
6497 | |
6498 | switch (dc->base.is_jmp) { |
6499 | case DISAS_GOTO_TB: |
6500 | case DISAS_NORETURN: |
6501 | break; |
6502 | case DISAS_TOO_MANY: |
6503 | case DISAS_PC_STALE: |
6504 | case DISAS_PC_STALE_NOCHAIN: |
6505 | update_psw_addr(dc); |
6506 | /* FALLTHRU */ |
6507 | case DISAS_PC_UPDATED: |
6508 | /* Next TB starts off with CC_OP_DYNAMIC, so make sure the |
6509 | cc op type is in env */ |
6510 | update_cc_op(dc); |
6511 | /* FALLTHRU */ |
6512 | case DISAS_PC_CC_UPDATED: |
6513 | /* Exit the TB, either by raising a debug exception or by return. */ |
6514 | if (dc->do_debug) { |
6515 | gen_exception(EXCP_DEBUG); |
6516 | } else if (use_exit_tb(dc) || |
6517 | dc->base.is_jmp == DISAS_PC_STALE_NOCHAIN) { |
6518 | tcg_gen_exit_tb(NULL, 0); |
6519 | } else { |
6520 | tcg_gen_lookup_and_goto_ptr(); |
6521 | } |
6522 | break; |
6523 | default: |
6524 | g_assert_not_reached(); |
6525 | } |
6526 | } |
6527 | |
6528 | static void s390x_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) |
6529 | { |
6530 | DisasContext *dc = container_of(dcbase, DisasContext, base); |
6531 | |
6532 | if (unlikely(dc->ex_value)) { |
6533 | /* ??? Unfortunately log_target_disas can't use host memory. */ |
6534 | qemu_log("IN: EXECUTE %016" PRIx64, dc->ex_value); |
6535 | } else { |
6536 | qemu_log("IN: %s\n" , lookup_symbol(dc->base.pc_first)); |
6537 | log_target_disas(cs, dc->base.pc_first, dc->base.tb->size); |
6538 | } |
6539 | } |
6540 | |
6541 | static const TranslatorOps s390x_tr_ops = { |
6542 | .init_disas_context = s390x_tr_init_disas_context, |
6543 | .tb_start = s390x_tr_tb_start, |
6544 | .insn_start = s390x_tr_insn_start, |
6545 | .breakpoint_check = s390x_tr_breakpoint_check, |
6546 | .translate_insn = s390x_tr_translate_insn, |
6547 | .tb_stop = s390x_tr_tb_stop, |
6548 | .disas_log = s390x_tr_disas_log, |
6549 | }; |
6550 | |
6551 | void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) |
6552 | { |
6553 | DisasContext dc; |
6554 | |
6555 | translator_loop(&s390x_tr_ops, &dc.base, cs, tb, max_insns); |
6556 | } |
6557 | |
6558 | void restore_state_to_opc(CPUS390XState *env, TranslationBlock *tb, |
6559 | target_ulong *data) |
6560 | { |
6561 | int cc_op = data[1]; |
6562 | env->psw.addr = data[0]; |
6563 | if ((cc_op != CC_OP_DYNAMIC) && (cc_op != CC_OP_STATIC)) { |
6564 | env->cc_op = cc_op; |
6565 | } |
6566 | } |
6567 | |