1 | /* |
2 | * OpenRISC translation |
3 | * |
4 | * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com> |
5 | * Feng Gao <gf91597@gmail.com> |
6 | * |
7 | * This library is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU Lesser General Public |
9 | * License as published by the Free Software Foundation; either |
10 | * version 2.1 of the License, or (at your option) any later version. |
11 | * |
12 | * This library is distributed in the hope that it will be useful, |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
15 | * Lesser General Public License for more details. |
16 | * |
17 | * You should have received a copy of the GNU Lesser General Public |
18 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
19 | */ |
20 | |
21 | #include "qemu/osdep.h" |
22 | #include "cpu.h" |
23 | #include "exec/exec-all.h" |
24 | #include "disas/disas.h" |
25 | #include "tcg-op.h" |
26 | #include "qemu/log.h" |
27 | #include "qemu/bitops.h" |
28 | #include "qemu/qemu-print.h" |
29 | #include "exec/cpu_ldst.h" |
30 | #include "exec/translator.h" |
31 | |
32 | #include "exec/helper-proto.h" |
33 | #include "exec/helper-gen.h" |
34 | #include "exec/gen-icount.h" |
35 | |
36 | #include "trace-tcg.h" |
37 | #include "exec/log.h" |
38 | |
39 | /* is_jmp field values */ |
40 | #define DISAS_EXIT DISAS_TARGET_0 /* force exit to main loop */ |
41 | #define DISAS_JUMP DISAS_TARGET_1 /* exit via jmp_pc/jmp_pc_imm */ |
42 | |
43 | typedef struct DisasContext { |
44 | DisasContextBase base; |
45 | uint32_t mem_idx; |
46 | uint32_t tb_flags; |
47 | uint32_t delayed_branch; |
48 | uint32_t cpucfgr; |
49 | uint32_t avr; |
50 | |
51 | /* If not -1, jmp_pc contains this value and so is a direct jump. */ |
52 | target_ulong jmp_pc_imm; |
53 | |
54 | /* The temporary corresponding to register 0 for this compilation. */ |
55 | TCGv R0; |
56 | } DisasContext; |
57 | |
58 | static inline bool is_user(DisasContext *dc) |
59 | { |
60 | #ifdef CONFIG_USER_ONLY |
61 | return true; |
62 | #else |
63 | return !(dc->tb_flags & TB_FLAGS_SM); |
64 | #endif |
65 | } |
66 | |
67 | /* Include the auto-generated decoder. */ |
68 | #include "decode.inc.c" |
69 | |
70 | static TCGv cpu_sr; |
71 | static TCGv cpu_regs[32]; |
72 | static TCGv cpu_pc; |
73 | static TCGv jmp_pc; /* l.jr/l.jalr temp pc */ |
74 | static TCGv cpu_ppc; |
75 | static TCGv cpu_sr_f; /* bf/bnf, F flag taken */ |
76 | static TCGv cpu_sr_cy; /* carry (unsigned overflow) */ |
77 | static TCGv cpu_sr_ov; /* signed overflow */ |
78 | static TCGv cpu_lock_addr; |
79 | static TCGv cpu_lock_value; |
80 | static TCGv_i32 fpcsr; |
81 | static TCGv_i64 cpu_mac; /* MACHI:MACLO */ |
82 | static TCGv_i32 cpu_dflag; |
83 | |
84 | void openrisc_translate_init(void) |
85 | { |
86 | static const char * const regnames[] = { |
87 | "r0" , "r1" , "r2" , "r3" , "r4" , "r5" , "r6" , "r7" , |
88 | "r8" , "r9" , "r10" , "r11" , "r12" , "r13" , "r14" , "r15" , |
89 | "r16" , "r17" , "r18" , "r19" , "r20" , "r21" , "r22" , "r23" , |
90 | "r24" , "r25" , "r26" , "r27" , "r28" , "r29" , "r30" , "r31" , |
91 | }; |
92 | int i; |
93 | |
94 | cpu_sr = tcg_global_mem_new(cpu_env, |
95 | offsetof(CPUOpenRISCState, sr), "sr" ); |
96 | cpu_dflag = tcg_global_mem_new_i32(cpu_env, |
97 | offsetof(CPUOpenRISCState, dflag), |
98 | "dflag" ); |
99 | cpu_pc = tcg_global_mem_new(cpu_env, |
100 | offsetof(CPUOpenRISCState, pc), "pc" ); |
101 | cpu_ppc = tcg_global_mem_new(cpu_env, |
102 | offsetof(CPUOpenRISCState, ppc), "ppc" ); |
103 | jmp_pc = tcg_global_mem_new(cpu_env, |
104 | offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc" ); |
105 | cpu_sr_f = tcg_global_mem_new(cpu_env, |
106 | offsetof(CPUOpenRISCState, sr_f), "sr_f" ); |
107 | cpu_sr_cy = tcg_global_mem_new(cpu_env, |
108 | offsetof(CPUOpenRISCState, sr_cy), "sr_cy" ); |
109 | cpu_sr_ov = tcg_global_mem_new(cpu_env, |
110 | offsetof(CPUOpenRISCState, sr_ov), "sr_ov" ); |
111 | cpu_lock_addr = tcg_global_mem_new(cpu_env, |
112 | offsetof(CPUOpenRISCState, lock_addr), |
113 | "lock_addr" ); |
114 | cpu_lock_value = tcg_global_mem_new(cpu_env, |
115 | offsetof(CPUOpenRISCState, lock_value), |
116 | "lock_value" ); |
117 | fpcsr = tcg_global_mem_new_i32(cpu_env, |
118 | offsetof(CPUOpenRISCState, fpcsr), |
119 | "fpcsr" ); |
120 | cpu_mac = tcg_global_mem_new_i64(cpu_env, |
121 | offsetof(CPUOpenRISCState, mac), |
122 | "mac" ); |
123 | for (i = 0; i < 32; i++) { |
124 | cpu_regs[i] = tcg_global_mem_new(cpu_env, |
125 | offsetof(CPUOpenRISCState, |
126 | shadow_gpr[0][i]), |
127 | regnames[i]); |
128 | } |
129 | } |
130 | |
131 | static void gen_exception(DisasContext *dc, unsigned int excp) |
132 | { |
133 | TCGv_i32 tmp = tcg_const_i32(excp); |
134 | gen_helper_exception(cpu_env, tmp); |
135 | tcg_temp_free_i32(tmp); |
136 | } |
137 | |
138 | static void gen_illegal_exception(DisasContext *dc) |
139 | { |
140 | tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); |
141 | gen_exception(dc, EXCP_ILLEGAL); |
142 | dc->base.is_jmp = DISAS_NORETURN; |
143 | } |
144 | |
145 | static bool check_v1_3(DisasContext *dc) |
146 | { |
147 | return dc->avr >= 0x01030000; |
148 | } |
149 | |
150 | static bool check_of32s(DisasContext *dc) |
151 | { |
152 | return dc->cpucfgr & CPUCFGR_OF32S; |
153 | } |
154 | |
155 | static bool check_of64a32s(DisasContext *dc) |
156 | { |
157 | return dc->cpucfgr & CPUCFGR_OF64A32S; |
158 | } |
159 | |
160 | static TCGv cpu_R(DisasContext *dc, int reg) |
161 | { |
162 | if (reg == 0) { |
163 | return dc->R0; |
164 | } else { |
165 | return cpu_regs[reg]; |
166 | } |
167 | } |
168 | |
169 | /* |
170 | * We're about to write to REG. On the off-chance that the user is |
171 | * writing to R0, re-instate the architectural register. |
172 | */ |
173 | static void check_r0_write(DisasContext *dc, int reg) |
174 | { |
175 | if (unlikely(reg == 0)) { |
176 | dc->R0 = cpu_regs[0]; |
177 | } |
178 | } |
179 | |
180 | static void gen_ove_cy(DisasContext *dc) |
181 | { |
182 | if (dc->tb_flags & SR_OVE) { |
183 | gen_helper_ove_cy(cpu_env); |
184 | } |
185 | } |
186 | |
187 | static void gen_ove_ov(DisasContext *dc) |
188 | { |
189 | if (dc->tb_flags & SR_OVE) { |
190 | gen_helper_ove_ov(cpu_env); |
191 | } |
192 | } |
193 | |
194 | static void gen_ove_cyov(DisasContext *dc) |
195 | { |
196 | if (dc->tb_flags & SR_OVE) { |
197 | gen_helper_ove_cyov(cpu_env); |
198 | } |
199 | } |
200 | |
201 | static void gen_add(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) |
202 | { |
203 | TCGv t0 = tcg_const_tl(0); |
204 | TCGv res = tcg_temp_new(); |
205 | |
206 | tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, srcb, t0); |
207 | tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); |
208 | tcg_gen_xor_tl(t0, res, srcb); |
209 | tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); |
210 | tcg_temp_free(t0); |
211 | |
212 | tcg_gen_mov_tl(dest, res); |
213 | tcg_temp_free(res); |
214 | |
215 | gen_ove_cyov(dc); |
216 | } |
217 | |
218 | static void gen_addc(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) |
219 | { |
220 | TCGv t0 = tcg_const_tl(0); |
221 | TCGv res = tcg_temp_new(); |
222 | |
223 | tcg_gen_add2_tl(res, cpu_sr_cy, srca, t0, cpu_sr_cy, t0); |
224 | tcg_gen_add2_tl(res, cpu_sr_cy, res, cpu_sr_cy, srcb, t0); |
225 | tcg_gen_xor_tl(cpu_sr_ov, srca, srcb); |
226 | tcg_gen_xor_tl(t0, res, srcb); |
227 | tcg_gen_andc_tl(cpu_sr_ov, t0, cpu_sr_ov); |
228 | tcg_temp_free(t0); |
229 | |
230 | tcg_gen_mov_tl(dest, res); |
231 | tcg_temp_free(res); |
232 | |
233 | gen_ove_cyov(dc); |
234 | } |
235 | |
236 | static void gen_sub(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) |
237 | { |
238 | TCGv res = tcg_temp_new(); |
239 | |
240 | tcg_gen_sub_tl(res, srca, srcb); |
241 | tcg_gen_xor_tl(cpu_sr_cy, srca, srcb); |
242 | tcg_gen_xor_tl(cpu_sr_ov, res, srcb); |
243 | tcg_gen_and_tl(cpu_sr_ov, cpu_sr_ov, cpu_sr_cy); |
244 | tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_cy, srca, srcb); |
245 | |
246 | tcg_gen_mov_tl(dest, res); |
247 | tcg_temp_free(res); |
248 | |
249 | gen_ove_cyov(dc); |
250 | } |
251 | |
252 | static void gen_mul(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) |
253 | { |
254 | TCGv t0 = tcg_temp_new(); |
255 | |
256 | tcg_gen_muls2_tl(dest, cpu_sr_ov, srca, srcb); |
257 | tcg_gen_sari_tl(t0, dest, TARGET_LONG_BITS - 1); |
258 | tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_ov, cpu_sr_ov, t0); |
259 | tcg_temp_free(t0); |
260 | |
261 | tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); |
262 | gen_ove_ov(dc); |
263 | } |
264 | |
265 | static void gen_mulu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) |
266 | { |
267 | tcg_gen_muls2_tl(dest, cpu_sr_cy, srca, srcb); |
268 | tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_cy, cpu_sr_cy, 0); |
269 | |
270 | gen_ove_cy(dc); |
271 | } |
272 | |
273 | static void gen_div(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) |
274 | { |
275 | TCGv t0 = tcg_temp_new(); |
276 | |
277 | tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_ov, srcb, 0); |
278 | /* The result of divide-by-zero is undefined. |
279 | Supress the host-side exception by dividing by 1. */ |
280 | tcg_gen_or_tl(t0, srcb, cpu_sr_ov); |
281 | tcg_gen_div_tl(dest, srca, t0); |
282 | tcg_temp_free(t0); |
283 | |
284 | tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); |
285 | gen_ove_ov(dc); |
286 | } |
287 | |
288 | static void gen_divu(DisasContext *dc, TCGv dest, TCGv srca, TCGv srcb) |
289 | { |
290 | TCGv t0 = tcg_temp_new(); |
291 | |
292 | tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_cy, srcb, 0); |
293 | /* The result of divide-by-zero is undefined. |
294 | Supress the host-side exception by dividing by 1. */ |
295 | tcg_gen_or_tl(t0, srcb, cpu_sr_cy); |
296 | tcg_gen_divu_tl(dest, srca, t0); |
297 | tcg_temp_free(t0); |
298 | |
299 | gen_ove_cy(dc); |
300 | } |
301 | |
302 | static void gen_muld(DisasContext *dc, TCGv srca, TCGv srcb) |
303 | { |
304 | TCGv_i64 t1 = tcg_temp_new_i64(); |
305 | TCGv_i64 t2 = tcg_temp_new_i64(); |
306 | |
307 | tcg_gen_ext_tl_i64(t1, srca); |
308 | tcg_gen_ext_tl_i64(t2, srcb); |
309 | if (TARGET_LONG_BITS == 32) { |
310 | tcg_gen_mul_i64(cpu_mac, t1, t2); |
311 | tcg_gen_movi_tl(cpu_sr_ov, 0); |
312 | } else { |
313 | TCGv_i64 high = tcg_temp_new_i64(); |
314 | |
315 | tcg_gen_muls2_i64(cpu_mac, high, t1, t2); |
316 | tcg_gen_sari_i64(t1, cpu_mac, 63); |
317 | tcg_gen_setcond_i64(TCG_COND_NE, t1, t1, high); |
318 | tcg_temp_free_i64(high); |
319 | tcg_gen_trunc_i64_tl(cpu_sr_ov, t1); |
320 | tcg_gen_neg_tl(cpu_sr_ov, cpu_sr_ov); |
321 | |
322 | gen_ove_ov(dc); |
323 | } |
324 | tcg_temp_free_i64(t1); |
325 | tcg_temp_free_i64(t2); |
326 | } |
327 | |
328 | static void gen_muldu(DisasContext *dc, TCGv srca, TCGv srcb) |
329 | { |
330 | TCGv_i64 t1 = tcg_temp_new_i64(); |
331 | TCGv_i64 t2 = tcg_temp_new_i64(); |
332 | |
333 | tcg_gen_extu_tl_i64(t1, srca); |
334 | tcg_gen_extu_tl_i64(t2, srcb); |
335 | if (TARGET_LONG_BITS == 32) { |
336 | tcg_gen_mul_i64(cpu_mac, t1, t2); |
337 | tcg_gen_movi_tl(cpu_sr_cy, 0); |
338 | } else { |
339 | TCGv_i64 high = tcg_temp_new_i64(); |
340 | |
341 | tcg_gen_mulu2_i64(cpu_mac, high, t1, t2); |
342 | tcg_gen_setcondi_i64(TCG_COND_NE, high, high, 0); |
343 | tcg_gen_trunc_i64_tl(cpu_sr_cy, high); |
344 | tcg_temp_free_i64(high); |
345 | |
346 | gen_ove_cy(dc); |
347 | } |
348 | tcg_temp_free_i64(t1); |
349 | tcg_temp_free_i64(t2); |
350 | } |
351 | |
352 | static void gen_mac(DisasContext *dc, TCGv srca, TCGv srcb) |
353 | { |
354 | TCGv_i64 t1 = tcg_temp_new_i64(); |
355 | TCGv_i64 t2 = tcg_temp_new_i64(); |
356 | |
357 | tcg_gen_ext_tl_i64(t1, srca); |
358 | tcg_gen_ext_tl_i64(t2, srcb); |
359 | tcg_gen_mul_i64(t1, t1, t2); |
360 | |
361 | /* Note that overflow is only computed during addition stage. */ |
362 | tcg_gen_xor_i64(t2, cpu_mac, t1); |
363 | tcg_gen_add_i64(cpu_mac, cpu_mac, t1); |
364 | tcg_gen_xor_i64(t1, t1, cpu_mac); |
365 | tcg_gen_andc_i64(t1, t1, t2); |
366 | tcg_temp_free_i64(t2); |
367 | |
368 | #if TARGET_LONG_BITS == 32 |
369 | tcg_gen_extrh_i64_i32(cpu_sr_ov, t1); |
370 | #else |
371 | tcg_gen_mov_i64(cpu_sr_ov, t1); |
372 | #endif |
373 | tcg_temp_free_i64(t1); |
374 | |
375 | gen_ove_ov(dc); |
376 | } |
377 | |
378 | static void gen_macu(DisasContext *dc, TCGv srca, TCGv srcb) |
379 | { |
380 | TCGv_i64 t1 = tcg_temp_new_i64(); |
381 | TCGv_i64 t2 = tcg_temp_new_i64(); |
382 | |
383 | tcg_gen_extu_tl_i64(t1, srca); |
384 | tcg_gen_extu_tl_i64(t2, srcb); |
385 | tcg_gen_mul_i64(t1, t1, t2); |
386 | tcg_temp_free_i64(t2); |
387 | |
388 | /* Note that overflow is only computed during addition stage. */ |
389 | tcg_gen_add_i64(cpu_mac, cpu_mac, t1); |
390 | tcg_gen_setcond_i64(TCG_COND_LTU, t1, cpu_mac, t1); |
391 | tcg_gen_trunc_i64_tl(cpu_sr_cy, t1); |
392 | tcg_temp_free_i64(t1); |
393 | |
394 | gen_ove_cy(dc); |
395 | } |
396 | |
397 | static void gen_msb(DisasContext *dc, TCGv srca, TCGv srcb) |
398 | { |
399 | TCGv_i64 t1 = tcg_temp_new_i64(); |
400 | TCGv_i64 t2 = tcg_temp_new_i64(); |
401 | |
402 | tcg_gen_ext_tl_i64(t1, srca); |
403 | tcg_gen_ext_tl_i64(t2, srcb); |
404 | tcg_gen_mul_i64(t1, t1, t2); |
405 | |
406 | /* Note that overflow is only computed during subtraction stage. */ |
407 | tcg_gen_xor_i64(t2, cpu_mac, t1); |
408 | tcg_gen_sub_i64(cpu_mac, cpu_mac, t1); |
409 | tcg_gen_xor_i64(t1, t1, cpu_mac); |
410 | tcg_gen_and_i64(t1, t1, t2); |
411 | tcg_temp_free_i64(t2); |
412 | |
413 | #if TARGET_LONG_BITS == 32 |
414 | tcg_gen_extrh_i64_i32(cpu_sr_ov, t1); |
415 | #else |
416 | tcg_gen_mov_i64(cpu_sr_ov, t1); |
417 | #endif |
418 | tcg_temp_free_i64(t1); |
419 | |
420 | gen_ove_ov(dc); |
421 | } |
422 | |
423 | static void gen_msbu(DisasContext *dc, TCGv srca, TCGv srcb) |
424 | { |
425 | TCGv_i64 t1 = tcg_temp_new_i64(); |
426 | TCGv_i64 t2 = tcg_temp_new_i64(); |
427 | |
428 | tcg_gen_extu_tl_i64(t1, srca); |
429 | tcg_gen_extu_tl_i64(t2, srcb); |
430 | tcg_gen_mul_i64(t1, t1, t2); |
431 | |
432 | /* Note that overflow is only computed during subtraction stage. */ |
433 | tcg_gen_setcond_i64(TCG_COND_LTU, t2, cpu_mac, t1); |
434 | tcg_gen_sub_i64(cpu_mac, cpu_mac, t1); |
435 | tcg_gen_trunc_i64_tl(cpu_sr_cy, t2); |
436 | tcg_temp_free_i64(t2); |
437 | tcg_temp_free_i64(t1); |
438 | |
439 | gen_ove_cy(dc); |
440 | } |
441 | |
442 | static bool trans_l_add(DisasContext *dc, arg_dab *a) |
443 | { |
444 | check_r0_write(dc, a->d); |
445 | gen_add(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
446 | return true; |
447 | } |
448 | |
449 | static bool trans_l_addc(DisasContext *dc, arg_dab *a) |
450 | { |
451 | check_r0_write(dc, a->d); |
452 | gen_addc(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
453 | return true; |
454 | } |
455 | |
456 | static bool trans_l_sub(DisasContext *dc, arg_dab *a) |
457 | { |
458 | check_r0_write(dc, a->d); |
459 | gen_sub(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
460 | return true; |
461 | } |
462 | |
463 | static bool trans_l_and(DisasContext *dc, arg_dab *a) |
464 | { |
465 | check_r0_write(dc, a->d); |
466 | tcg_gen_and_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
467 | return true; |
468 | } |
469 | |
470 | static bool trans_l_or(DisasContext *dc, arg_dab *a) |
471 | { |
472 | check_r0_write(dc, a->d); |
473 | tcg_gen_or_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
474 | return true; |
475 | } |
476 | |
477 | static bool trans_l_xor(DisasContext *dc, arg_dab *a) |
478 | { |
479 | check_r0_write(dc, a->d); |
480 | tcg_gen_xor_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
481 | return true; |
482 | } |
483 | |
484 | static bool trans_l_sll(DisasContext *dc, arg_dab *a) |
485 | { |
486 | check_r0_write(dc, a->d); |
487 | tcg_gen_shl_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
488 | return true; |
489 | } |
490 | |
491 | static bool trans_l_srl(DisasContext *dc, arg_dab *a) |
492 | { |
493 | check_r0_write(dc, a->d); |
494 | tcg_gen_shr_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
495 | return true; |
496 | } |
497 | |
498 | static bool trans_l_sra(DisasContext *dc, arg_dab *a) |
499 | { |
500 | check_r0_write(dc, a->d); |
501 | tcg_gen_sar_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
502 | return true; |
503 | } |
504 | |
505 | static bool trans_l_ror(DisasContext *dc, arg_dab *a) |
506 | { |
507 | check_r0_write(dc, a->d); |
508 | tcg_gen_rotr_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
509 | return true; |
510 | } |
511 | |
512 | static bool trans_l_exths(DisasContext *dc, arg_da *a) |
513 | { |
514 | check_r0_write(dc, a->d); |
515 | tcg_gen_ext16s_tl(cpu_R(dc, a->d), cpu_R(dc, a->a)); |
516 | return true; |
517 | } |
518 | |
519 | static bool trans_l_extbs(DisasContext *dc, arg_da *a) |
520 | { |
521 | check_r0_write(dc, a->d); |
522 | tcg_gen_ext8s_tl(cpu_R(dc, a->d), cpu_R(dc, a->a)); |
523 | return true; |
524 | } |
525 | |
526 | static bool trans_l_exthz(DisasContext *dc, arg_da *a) |
527 | { |
528 | check_r0_write(dc, a->d); |
529 | tcg_gen_ext16u_tl(cpu_R(dc, a->d), cpu_R(dc, a->a)); |
530 | return true; |
531 | } |
532 | |
533 | static bool trans_l_extbz(DisasContext *dc, arg_da *a) |
534 | { |
535 | check_r0_write(dc, a->d); |
536 | tcg_gen_ext8u_tl(cpu_R(dc, a->d), cpu_R(dc, a->a)); |
537 | return true; |
538 | } |
539 | |
540 | static bool trans_l_cmov(DisasContext *dc, arg_dab *a) |
541 | { |
542 | TCGv zero; |
543 | |
544 | check_r0_write(dc, a->d); |
545 | zero = tcg_const_tl(0); |
546 | tcg_gen_movcond_tl(TCG_COND_NE, cpu_R(dc, a->d), cpu_sr_f, zero, |
547 | cpu_R(dc, a->a), cpu_R(dc, a->b)); |
548 | tcg_temp_free(zero); |
549 | return true; |
550 | } |
551 | |
552 | static bool trans_l_ff1(DisasContext *dc, arg_da *a) |
553 | { |
554 | check_r0_write(dc, a->d); |
555 | tcg_gen_ctzi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), -1); |
556 | tcg_gen_addi_tl(cpu_R(dc, a->d), cpu_R(dc, a->d), 1); |
557 | return true; |
558 | } |
559 | |
560 | static bool trans_l_fl1(DisasContext *dc, arg_da *a) |
561 | { |
562 | check_r0_write(dc, a->d); |
563 | tcg_gen_clzi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), TARGET_LONG_BITS); |
564 | tcg_gen_subfi_tl(cpu_R(dc, a->d), TARGET_LONG_BITS, cpu_R(dc, a->d)); |
565 | return true; |
566 | } |
567 | |
568 | static bool trans_l_mul(DisasContext *dc, arg_dab *a) |
569 | { |
570 | check_r0_write(dc, a->d); |
571 | gen_mul(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
572 | return true; |
573 | } |
574 | |
575 | static bool trans_l_mulu(DisasContext *dc, arg_dab *a) |
576 | { |
577 | check_r0_write(dc, a->d); |
578 | gen_mulu(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
579 | return true; |
580 | } |
581 | |
582 | static bool trans_l_div(DisasContext *dc, arg_dab *a) |
583 | { |
584 | check_r0_write(dc, a->d); |
585 | gen_div(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
586 | return true; |
587 | } |
588 | |
589 | static bool trans_l_divu(DisasContext *dc, arg_dab *a) |
590 | { |
591 | check_r0_write(dc, a->d); |
592 | gen_divu(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), cpu_R(dc, a->b)); |
593 | return true; |
594 | } |
595 | |
596 | static bool trans_l_muld(DisasContext *dc, arg_ab *a) |
597 | { |
598 | gen_muld(dc, cpu_R(dc, a->a), cpu_R(dc, a->b)); |
599 | return true; |
600 | } |
601 | |
602 | static bool trans_l_muldu(DisasContext *dc, arg_ab *a) |
603 | { |
604 | gen_muldu(dc, cpu_R(dc, a->a), cpu_R(dc, a->b)); |
605 | return true; |
606 | } |
607 | |
608 | static bool trans_l_j(DisasContext *dc, arg_l_j *a) |
609 | { |
610 | target_ulong tmp_pc = dc->base.pc_next + a->n * 4; |
611 | |
612 | tcg_gen_movi_tl(jmp_pc, tmp_pc); |
613 | dc->jmp_pc_imm = tmp_pc; |
614 | dc->delayed_branch = 2; |
615 | return true; |
616 | } |
617 | |
618 | static bool trans_l_jal(DisasContext *dc, arg_l_jal *a) |
619 | { |
620 | target_ulong tmp_pc = dc->base.pc_next + a->n * 4; |
621 | target_ulong ret_pc = dc->base.pc_next + 8; |
622 | |
623 | tcg_gen_movi_tl(cpu_regs[9], ret_pc); |
624 | /* Optimize jal being used to load the PC for PIC. */ |
625 | if (tmp_pc != ret_pc) { |
626 | tcg_gen_movi_tl(jmp_pc, tmp_pc); |
627 | dc->jmp_pc_imm = tmp_pc; |
628 | dc->delayed_branch = 2; |
629 | } |
630 | return true; |
631 | } |
632 | |
633 | static void do_bf(DisasContext *dc, arg_l_bf *a, TCGCond cond) |
634 | { |
635 | target_ulong tmp_pc = dc->base.pc_next + a->n * 4; |
636 | TCGv t_next = tcg_const_tl(dc->base.pc_next + 8); |
637 | TCGv t_true = tcg_const_tl(tmp_pc); |
638 | TCGv t_zero = tcg_const_tl(0); |
639 | |
640 | tcg_gen_movcond_tl(cond, jmp_pc, cpu_sr_f, t_zero, t_true, t_next); |
641 | |
642 | tcg_temp_free(t_next); |
643 | tcg_temp_free(t_true); |
644 | tcg_temp_free(t_zero); |
645 | dc->delayed_branch = 2; |
646 | } |
647 | |
648 | static bool trans_l_bf(DisasContext *dc, arg_l_bf *a) |
649 | { |
650 | do_bf(dc, a, TCG_COND_NE); |
651 | return true; |
652 | } |
653 | |
654 | static bool trans_l_bnf(DisasContext *dc, arg_l_bf *a) |
655 | { |
656 | do_bf(dc, a, TCG_COND_EQ); |
657 | return true; |
658 | } |
659 | |
660 | static bool trans_l_jr(DisasContext *dc, arg_l_jr *a) |
661 | { |
662 | tcg_gen_mov_tl(jmp_pc, cpu_R(dc, a->b)); |
663 | dc->delayed_branch = 2; |
664 | return true; |
665 | } |
666 | |
667 | static bool trans_l_jalr(DisasContext *dc, arg_l_jalr *a) |
668 | { |
669 | tcg_gen_mov_tl(jmp_pc, cpu_R(dc, a->b)); |
670 | tcg_gen_movi_tl(cpu_regs[9], dc->base.pc_next + 8); |
671 | dc->delayed_branch = 2; |
672 | return true; |
673 | } |
674 | |
675 | static bool trans_l_lwa(DisasContext *dc, arg_load *a) |
676 | { |
677 | TCGv ea; |
678 | |
679 | check_r0_write(dc, a->d); |
680 | ea = tcg_temp_new(); |
681 | tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i); |
682 | tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, MO_TEUL); |
683 | tcg_gen_mov_tl(cpu_lock_addr, ea); |
684 | tcg_gen_mov_tl(cpu_lock_value, cpu_R(dc, a->d)); |
685 | tcg_temp_free(ea); |
686 | return true; |
687 | } |
688 | |
689 | static void do_load(DisasContext *dc, arg_load *a, MemOp mop) |
690 | { |
691 | TCGv ea; |
692 | |
693 | check_r0_write(dc, a->d); |
694 | ea = tcg_temp_new(); |
695 | tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i); |
696 | tcg_gen_qemu_ld_tl(cpu_R(dc, a->d), ea, dc->mem_idx, mop); |
697 | tcg_temp_free(ea); |
698 | } |
699 | |
700 | static bool trans_l_lwz(DisasContext *dc, arg_load *a) |
701 | { |
702 | do_load(dc, a, MO_TEUL); |
703 | return true; |
704 | } |
705 | |
706 | static bool trans_l_lws(DisasContext *dc, arg_load *a) |
707 | { |
708 | do_load(dc, a, MO_TESL); |
709 | return true; |
710 | } |
711 | |
712 | static bool trans_l_lbz(DisasContext *dc, arg_load *a) |
713 | { |
714 | do_load(dc, a, MO_UB); |
715 | return true; |
716 | } |
717 | |
718 | static bool trans_l_lbs(DisasContext *dc, arg_load *a) |
719 | { |
720 | do_load(dc, a, MO_SB); |
721 | return true; |
722 | } |
723 | |
724 | static bool trans_l_lhz(DisasContext *dc, arg_load *a) |
725 | { |
726 | do_load(dc, a, MO_TEUW); |
727 | return true; |
728 | } |
729 | |
730 | static bool trans_l_lhs(DisasContext *dc, arg_load *a) |
731 | { |
732 | do_load(dc, a, MO_TESW); |
733 | return true; |
734 | } |
735 | |
736 | static bool trans_l_swa(DisasContext *dc, arg_store *a) |
737 | { |
738 | TCGv ea, val; |
739 | TCGLabel *lab_fail, *lab_done; |
740 | |
741 | ea = tcg_temp_new(); |
742 | tcg_gen_addi_tl(ea, cpu_R(dc, a->a), a->i); |
743 | |
744 | /* For TB_FLAGS_R0_0, the branch below invalidates the temporary assigned |
745 | to cpu_regs[0]. Since l.swa is quite often immediately followed by a |
746 | branch, don't bother reallocating; finish the TB using the "real" R0. |
747 | This also takes care of RB input across the branch. */ |
748 | dc->R0 = cpu_regs[0]; |
749 | |
750 | lab_fail = gen_new_label(); |
751 | lab_done = gen_new_label(); |
752 | tcg_gen_brcond_tl(TCG_COND_NE, ea, cpu_lock_addr, lab_fail); |
753 | tcg_temp_free(ea); |
754 | |
755 | val = tcg_temp_new(); |
756 | tcg_gen_atomic_cmpxchg_tl(val, cpu_lock_addr, cpu_lock_value, |
757 | cpu_regs[a->b], dc->mem_idx, MO_TEUL); |
758 | tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, val, cpu_lock_value); |
759 | tcg_temp_free(val); |
760 | |
761 | tcg_gen_br(lab_done); |
762 | |
763 | gen_set_label(lab_fail); |
764 | tcg_gen_movi_tl(cpu_sr_f, 0); |
765 | |
766 | gen_set_label(lab_done); |
767 | tcg_gen_movi_tl(cpu_lock_addr, -1); |
768 | return true; |
769 | } |
770 | |
771 | static void do_store(DisasContext *dc, arg_store *a, MemOp mop) |
772 | { |
773 | TCGv t0 = tcg_temp_new(); |
774 | tcg_gen_addi_tl(t0, cpu_R(dc, a->a), a->i); |
775 | tcg_gen_qemu_st_tl(cpu_R(dc, a->b), t0, dc->mem_idx, mop); |
776 | tcg_temp_free(t0); |
777 | } |
778 | |
779 | static bool trans_l_sw(DisasContext *dc, arg_store *a) |
780 | { |
781 | do_store(dc, a, MO_TEUL); |
782 | return true; |
783 | } |
784 | |
785 | static bool trans_l_sb(DisasContext *dc, arg_store *a) |
786 | { |
787 | do_store(dc, a, MO_UB); |
788 | return true; |
789 | } |
790 | |
791 | static bool trans_l_sh(DisasContext *dc, arg_store *a) |
792 | { |
793 | do_store(dc, a, MO_TEUW); |
794 | return true; |
795 | } |
796 | |
797 | static bool trans_l_nop(DisasContext *dc, arg_l_nop *a) |
798 | { |
799 | return true; |
800 | } |
801 | |
802 | static bool trans_l_adrp(DisasContext *dc, arg_l_adrp *a) |
803 | { |
804 | if (!check_v1_3(dc)) { |
805 | return false; |
806 | } |
807 | check_r0_write(dc, a->d); |
808 | |
809 | tcg_gen_movi_i32(cpu_R(dc, a->d), |
810 | (dc->base.pc_next & TARGET_PAGE_MASK) + |
811 | ((target_long)a->i << TARGET_PAGE_BITS)); |
812 | return true; |
813 | } |
814 | |
815 | static bool trans_l_addi(DisasContext *dc, arg_rri *a) |
816 | { |
817 | TCGv t0; |
818 | |
819 | check_r0_write(dc, a->d); |
820 | t0 = tcg_const_tl(a->i); |
821 | gen_add(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), t0); |
822 | tcg_temp_free(t0); |
823 | return true; |
824 | } |
825 | |
826 | static bool trans_l_addic(DisasContext *dc, arg_rri *a) |
827 | { |
828 | TCGv t0; |
829 | |
830 | check_r0_write(dc, a->d); |
831 | t0 = tcg_const_tl(a->i); |
832 | gen_addc(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), t0); |
833 | tcg_temp_free(t0); |
834 | return true; |
835 | } |
836 | |
837 | static bool trans_l_muli(DisasContext *dc, arg_rri *a) |
838 | { |
839 | TCGv t0; |
840 | |
841 | check_r0_write(dc, a->d); |
842 | t0 = tcg_const_tl(a->i); |
843 | gen_mul(dc, cpu_R(dc, a->d), cpu_R(dc, a->a), t0); |
844 | tcg_temp_free(t0); |
845 | return true; |
846 | } |
847 | |
848 | static bool trans_l_maci(DisasContext *dc, arg_l_maci *a) |
849 | { |
850 | TCGv t0; |
851 | |
852 | t0 = tcg_const_tl(a->i); |
853 | gen_mac(dc, cpu_R(dc, a->a), t0); |
854 | tcg_temp_free(t0); |
855 | return true; |
856 | } |
857 | |
858 | static bool trans_l_andi(DisasContext *dc, arg_rrk *a) |
859 | { |
860 | check_r0_write(dc, a->d); |
861 | tcg_gen_andi_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k); |
862 | return true; |
863 | } |
864 | |
865 | static bool trans_l_ori(DisasContext *dc, arg_rrk *a) |
866 | { |
867 | check_r0_write(dc, a->d); |
868 | tcg_gen_ori_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->k); |
869 | return true; |
870 | } |
871 | |
872 | static bool trans_l_xori(DisasContext *dc, arg_rri *a) |
873 | { |
874 | check_r0_write(dc, a->d); |
875 | tcg_gen_xori_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), a->i); |
876 | return true; |
877 | } |
878 | |
879 | static bool trans_l_mfspr(DisasContext *dc, arg_l_mfspr *a) |
880 | { |
881 | check_r0_write(dc, a->d); |
882 | |
883 | if (is_user(dc)) { |
884 | gen_illegal_exception(dc); |
885 | } else { |
886 | TCGv spr = tcg_temp_new(); |
887 | tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k); |
888 | gen_helper_mfspr(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->d), spr); |
889 | tcg_temp_free(spr); |
890 | } |
891 | return true; |
892 | } |
893 | |
894 | static bool trans_l_mtspr(DisasContext *dc, arg_l_mtspr *a) |
895 | { |
896 | if (is_user(dc)) { |
897 | gen_illegal_exception(dc); |
898 | } else { |
899 | TCGv spr; |
900 | |
901 | /* For SR, we will need to exit the TB to recognize the new |
902 | * exception state. For NPC, in theory this counts as a branch |
903 | * (although the SPR only exists for use by an ICE). Save all |
904 | * of the cpu state first, allowing it to be overwritten. |
905 | */ |
906 | if (dc->delayed_branch) { |
907 | tcg_gen_mov_tl(cpu_pc, jmp_pc); |
908 | tcg_gen_discard_tl(jmp_pc); |
909 | } else { |
910 | tcg_gen_movi_tl(cpu_pc, dc->base.pc_next + 4); |
911 | } |
912 | dc->base.is_jmp = DISAS_EXIT; |
913 | |
914 | spr = tcg_temp_new(); |
915 | tcg_gen_ori_tl(spr, cpu_R(dc, a->a), a->k); |
916 | gen_helper_mtspr(cpu_env, spr, cpu_R(dc, a->b)); |
917 | tcg_temp_free(spr); |
918 | } |
919 | return true; |
920 | } |
921 | |
922 | static bool trans_l_mac(DisasContext *dc, arg_ab *a) |
923 | { |
924 | gen_mac(dc, cpu_R(dc, a->a), cpu_R(dc, a->b)); |
925 | return true; |
926 | } |
927 | |
928 | static bool trans_l_msb(DisasContext *dc, arg_ab *a) |
929 | { |
930 | gen_msb(dc, cpu_R(dc, a->a), cpu_R(dc, a->b)); |
931 | return true; |
932 | } |
933 | |
934 | static bool trans_l_macu(DisasContext *dc, arg_ab *a) |
935 | { |
936 | gen_macu(dc, cpu_R(dc, a->a), cpu_R(dc, a->b)); |
937 | return true; |
938 | } |
939 | |
940 | static bool trans_l_msbu(DisasContext *dc, arg_ab *a) |
941 | { |
942 | gen_msbu(dc, cpu_R(dc, a->a), cpu_R(dc, a->b)); |
943 | return true; |
944 | } |
945 | |
946 | static bool trans_l_slli(DisasContext *dc, arg_dal *a) |
947 | { |
948 | check_r0_write(dc, a->d); |
949 | tcg_gen_shli_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), |
950 | a->l & (TARGET_LONG_BITS - 1)); |
951 | return true; |
952 | } |
953 | |
954 | static bool trans_l_srli(DisasContext *dc, arg_dal *a) |
955 | { |
956 | check_r0_write(dc, a->d); |
957 | tcg_gen_shri_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), |
958 | a->l & (TARGET_LONG_BITS - 1)); |
959 | return true; |
960 | } |
961 | |
962 | static bool trans_l_srai(DisasContext *dc, arg_dal *a) |
963 | { |
964 | check_r0_write(dc, a->d); |
965 | tcg_gen_sari_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), |
966 | a->l & (TARGET_LONG_BITS - 1)); |
967 | return true; |
968 | } |
969 | |
970 | static bool trans_l_rori(DisasContext *dc, arg_dal *a) |
971 | { |
972 | check_r0_write(dc, a->d); |
973 | tcg_gen_rotri_tl(cpu_R(dc, a->d), cpu_R(dc, a->a), |
974 | a->l & (TARGET_LONG_BITS - 1)); |
975 | return true; |
976 | } |
977 | |
978 | static bool trans_l_movhi(DisasContext *dc, arg_l_movhi *a) |
979 | { |
980 | check_r0_write(dc, a->d); |
981 | tcg_gen_movi_tl(cpu_R(dc, a->d), a->k << 16); |
982 | return true; |
983 | } |
984 | |
985 | static bool trans_l_macrc(DisasContext *dc, arg_l_macrc *a) |
986 | { |
987 | check_r0_write(dc, a->d); |
988 | tcg_gen_trunc_i64_tl(cpu_R(dc, a->d), cpu_mac); |
989 | tcg_gen_movi_i64(cpu_mac, 0); |
990 | return true; |
991 | } |
992 | |
993 | static bool trans_l_sfeq(DisasContext *dc, arg_ab *a) |
994 | { |
995 | tcg_gen_setcond_tl(TCG_COND_EQ, cpu_sr_f, |
996 | cpu_R(dc, a->a), cpu_R(dc, a->b)); |
997 | return true; |
998 | } |
999 | |
1000 | static bool trans_l_sfne(DisasContext *dc, arg_ab *a) |
1001 | { |
1002 | tcg_gen_setcond_tl(TCG_COND_NE, cpu_sr_f, |
1003 | cpu_R(dc, a->a), cpu_R(dc, a->b)); |
1004 | return true; |
1005 | } |
1006 | |
1007 | static bool trans_l_sfgtu(DisasContext *dc, arg_ab *a) |
1008 | { |
1009 | tcg_gen_setcond_tl(TCG_COND_GTU, cpu_sr_f, |
1010 | cpu_R(dc, a->a), cpu_R(dc, a->b)); |
1011 | return true; |
1012 | } |
1013 | |
1014 | static bool trans_l_sfgeu(DisasContext *dc, arg_ab *a) |
1015 | { |
1016 | tcg_gen_setcond_tl(TCG_COND_GEU, cpu_sr_f, |
1017 | cpu_R(dc, a->a), cpu_R(dc, a->b)); |
1018 | return true; |
1019 | } |
1020 | |
1021 | static bool trans_l_sfltu(DisasContext *dc, arg_ab *a) |
1022 | { |
1023 | tcg_gen_setcond_tl(TCG_COND_LTU, cpu_sr_f, |
1024 | cpu_R(dc, a->a), cpu_R(dc, a->b)); |
1025 | return true; |
1026 | } |
1027 | |
1028 | static bool trans_l_sfleu(DisasContext *dc, arg_ab *a) |
1029 | { |
1030 | tcg_gen_setcond_tl(TCG_COND_LEU, cpu_sr_f, |
1031 | cpu_R(dc, a->a), cpu_R(dc, a->b)); |
1032 | return true; |
1033 | } |
1034 | |
1035 | static bool trans_l_sfgts(DisasContext *dc, arg_ab *a) |
1036 | { |
1037 | tcg_gen_setcond_tl(TCG_COND_GT, cpu_sr_f, |
1038 | cpu_R(dc, a->a), cpu_R(dc, a->b)); |
1039 | return true; |
1040 | } |
1041 | |
1042 | static bool trans_l_sfges(DisasContext *dc, arg_ab *a) |
1043 | { |
1044 | tcg_gen_setcond_tl(TCG_COND_GE, cpu_sr_f, |
1045 | cpu_R(dc, a->a), cpu_R(dc, a->b)); |
1046 | return true; |
1047 | } |
1048 | |
1049 | static bool trans_l_sflts(DisasContext *dc, arg_ab *a) |
1050 | { |
1051 | tcg_gen_setcond_tl(TCG_COND_LT, cpu_sr_f, |
1052 | cpu_R(dc, a->a), cpu_R(dc, a->b)); |
1053 | return true; |
1054 | } |
1055 | |
1056 | static bool trans_l_sfles(DisasContext *dc, arg_ab *a) |
1057 | { |
1058 | tcg_gen_setcond_tl(TCG_COND_LE, |
1059 | cpu_sr_f, cpu_R(dc, a->a), cpu_R(dc, a->b)); |
1060 | return true; |
1061 | } |
1062 | |
1063 | static bool trans_l_sfeqi(DisasContext *dc, arg_ai *a) |
1064 | { |
1065 | tcg_gen_setcondi_tl(TCG_COND_EQ, cpu_sr_f, cpu_R(dc, a->a), a->i); |
1066 | return true; |
1067 | } |
1068 | |
1069 | static bool trans_l_sfnei(DisasContext *dc, arg_ai *a) |
1070 | { |
1071 | tcg_gen_setcondi_tl(TCG_COND_NE, cpu_sr_f, cpu_R(dc, a->a), a->i); |
1072 | return true; |
1073 | } |
1074 | |
1075 | static bool trans_l_sfgtui(DisasContext *dc, arg_ai *a) |
1076 | { |
1077 | tcg_gen_setcondi_tl(TCG_COND_GTU, cpu_sr_f, cpu_R(dc, a->a), a->i); |
1078 | return true; |
1079 | } |
1080 | |
1081 | static bool trans_l_sfgeui(DisasContext *dc, arg_ai *a) |
1082 | { |
1083 | tcg_gen_setcondi_tl(TCG_COND_GEU, cpu_sr_f, cpu_R(dc, a->a), a->i); |
1084 | return true; |
1085 | } |
1086 | |
1087 | static bool trans_l_sfltui(DisasContext *dc, arg_ai *a) |
1088 | { |
1089 | tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_sr_f, cpu_R(dc, a->a), a->i); |
1090 | return true; |
1091 | } |
1092 | |
1093 | static bool trans_l_sfleui(DisasContext *dc, arg_ai *a) |
1094 | { |
1095 | tcg_gen_setcondi_tl(TCG_COND_LEU, cpu_sr_f, cpu_R(dc, a->a), a->i); |
1096 | return true; |
1097 | } |
1098 | |
1099 | static bool trans_l_sfgtsi(DisasContext *dc, arg_ai *a) |
1100 | { |
1101 | tcg_gen_setcondi_tl(TCG_COND_GT, cpu_sr_f, cpu_R(dc, a->a), a->i); |
1102 | return true; |
1103 | } |
1104 | |
1105 | static bool trans_l_sfgesi(DisasContext *dc, arg_ai *a) |
1106 | { |
1107 | tcg_gen_setcondi_tl(TCG_COND_GE, cpu_sr_f, cpu_R(dc, a->a), a->i); |
1108 | return true; |
1109 | } |
1110 | |
1111 | static bool trans_l_sfltsi(DisasContext *dc, arg_ai *a) |
1112 | { |
1113 | tcg_gen_setcondi_tl(TCG_COND_LT, cpu_sr_f, cpu_R(dc, a->a), a->i); |
1114 | return true; |
1115 | } |
1116 | |
1117 | static bool trans_l_sflesi(DisasContext *dc, arg_ai *a) |
1118 | { |
1119 | tcg_gen_setcondi_tl(TCG_COND_LE, cpu_sr_f, cpu_R(dc, a->a), a->i); |
1120 | return true; |
1121 | } |
1122 | |
1123 | static bool trans_l_sys(DisasContext *dc, arg_l_sys *a) |
1124 | { |
1125 | tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); |
1126 | gen_exception(dc, EXCP_SYSCALL); |
1127 | dc->base.is_jmp = DISAS_NORETURN; |
1128 | return true; |
1129 | } |
1130 | |
1131 | static bool trans_l_trap(DisasContext *dc, arg_l_trap *a) |
1132 | { |
1133 | tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); |
1134 | gen_exception(dc, EXCP_TRAP); |
1135 | dc->base.is_jmp = DISAS_NORETURN; |
1136 | return true; |
1137 | } |
1138 | |
1139 | static bool trans_l_msync(DisasContext *dc, arg_l_msync *a) |
1140 | { |
1141 | tcg_gen_mb(TCG_MO_ALL); |
1142 | return true; |
1143 | } |
1144 | |
1145 | static bool trans_l_psync(DisasContext *dc, arg_l_psync *a) |
1146 | { |
1147 | return true; |
1148 | } |
1149 | |
1150 | static bool trans_l_csync(DisasContext *dc, arg_l_csync *a) |
1151 | { |
1152 | return true; |
1153 | } |
1154 | |
1155 | static bool trans_l_rfe(DisasContext *dc, arg_l_rfe *a) |
1156 | { |
1157 | if (is_user(dc)) { |
1158 | gen_illegal_exception(dc); |
1159 | } else { |
1160 | gen_helper_rfe(cpu_env); |
1161 | dc->base.is_jmp = DISAS_EXIT; |
1162 | } |
1163 | return true; |
1164 | } |
1165 | |
1166 | static bool do_fp2(DisasContext *dc, arg_da *a, |
1167 | void (*fn)(TCGv, TCGv_env, TCGv)) |
1168 | { |
1169 | if (!check_of32s(dc)) { |
1170 | return false; |
1171 | } |
1172 | check_r0_write(dc, a->d); |
1173 | fn(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->a)); |
1174 | gen_helper_update_fpcsr(cpu_env); |
1175 | return true; |
1176 | } |
1177 | |
1178 | static bool do_fp3(DisasContext *dc, arg_dab *a, |
1179 | void (*fn)(TCGv, TCGv_env, TCGv, TCGv)) |
1180 | { |
1181 | if (!check_of32s(dc)) { |
1182 | return false; |
1183 | } |
1184 | check_r0_write(dc, a->d); |
1185 | fn(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->a), cpu_R(dc, a->b)); |
1186 | gen_helper_update_fpcsr(cpu_env); |
1187 | return true; |
1188 | } |
1189 | |
1190 | static bool do_fpcmp(DisasContext *dc, arg_ab *a, |
1191 | void (*fn)(TCGv, TCGv_env, TCGv, TCGv), |
1192 | bool inv, bool swap) |
1193 | { |
1194 | if (!check_of32s(dc)) { |
1195 | return false; |
1196 | } |
1197 | if (swap) { |
1198 | fn(cpu_sr_f, cpu_env, cpu_R(dc, a->b), cpu_R(dc, a->a)); |
1199 | } else { |
1200 | fn(cpu_sr_f, cpu_env, cpu_R(dc, a->a), cpu_R(dc, a->b)); |
1201 | } |
1202 | if (inv) { |
1203 | tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1); |
1204 | } |
1205 | gen_helper_update_fpcsr(cpu_env); |
1206 | return true; |
1207 | } |
1208 | |
1209 | static bool trans_lf_add_s(DisasContext *dc, arg_dab *a) |
1210 | { |
1211 | return do_fp3(dc, a, gen_helper_float_add_s); |
1212 | } |
1213 | |
1214 | static bool trans_lf_sub_s(DisasContext *dc, arg_dab *a) |
1215 | { |
1216 | return do_fp3(dc, a, gen_helper_float_sub_s); |
1217 | } |
1218 | |
1219 | static bool trans_lf_mul_s(DisasContext *dc, arg_dab *a) |
1220 | { |
1221 | return do_fp3(dc, a, gen_helper_float_mul_s); |
1222 | } |
1223 | |
1224 | static bool trans_lf_div_s(DisasContext *dc, arg_dab *a) |
1225 | { |
1226 | return do_fp3(dc, a, gen_helper_float_div_s); |
1227 | } |
1228 | |
1229 | static bool trans_lf_rem_s(DisasContext *dc, arg_dab *a) |
1230 | { |
1231 | return do_fp3(dc, a, gen_helper_float_rem_s); |
1232 | return true; |
1233 | } |
1234 | |
1235 | static bool trans_lf_itof_s(DisasContext *dc, arg_da *a) |
1236 | { |
1237 | return do_fp2(dc, a, gen_helper_itofs); |
1238 | } |
1239 | |
1240 | static bool trans_lf_ftoi_s(DisasContext *dc, arg_da *a) |
1241 | { |
1242 | return do_fp2(dc, a, gen_helper_ftois); |
1243 | } |
1244 | |
1245 | static bool trans_lf_madd_s(DisasContext *dc, arg_dab *a) |
1246 | { |
1247 | if (!check_of32s(dc)) { |
1248 | return false; |
1249 | } |
1250 | check_r0_write(dc, a->d); |
1251 | gen_helper_float_madd_s(cpu_R(dc, a->d), cpu_env, cpu_R(dc, a->d), |
1252 | cpu_R(dc, a->a), cpu_R(dc, a->b)); |
1253 | gen_helper_update_fpcsr(cpu_env); |
1254 | return true; |
1255 | } |
1256 | |
1257 | static bool trans_lf_sfeq_s(DisasContext *dc, arg_ab *a) |
1258 | { |
1259 | return do_fpcmp(dc, a, gen_helper_float_eq_s, false, false); |
1260 | } |
1261 | |
1262 | static bool trans_lf_sfne_s(DisasContext *dc, arg_ab *a) |
1263 | { |
1264 | return do_fpcmp(dc, a, gen_helper_float_eq_s, true, false); |
1265 | } |
1266 | |
1267 | static bool trans_lf_sfgt_s(DisasContext *dc, arg_ab *a) |
1268 | { |
1269 | return do_fpcmp(dc, a, gen_helper_float_lt_s, false, true); |
1270 | } |
1271 | |
1272 | static bool trans_lf_sfge_s(DisasContext *dc, arg_ab *a) |
1273 | { |
1274 | return do_fpcmp(dc, a, gen_helper_float_le_s, false, true); |
1275 | } |
1276 | |
1277 | static bool trans_lf_sflt_s(DisasContext *dc, arg_ab *a) |
1278 | { |
1279 | return do_fpcmp(dc, a, gen_helper_float_lt_s, false, false); |
1280 | } |
1281 | |
1282 | static bool trans_lf_sfle_s(DisasContext *dc, arg_ab *a) |
1283 | { |
1284 | return do_fpcmp(dc, a, gen_helper_float_le_s, false, false); |
1285 | } |
1286 | |
1287 | static bool trans_lf_sfueq_s(DisasContext *dc, arg_ab *a) |
1288 | { |
1289 | if (!check_v1_3(dc)) { |
1290 | return false; |
1291 | } |
1292 | return do_fpcmp(dc, a, gen_helper_float_ueq_s, false, false); |
1293 | } |
1294 | |
1295 | static bool trans_lf_sfult_s(DisasContext *dc, arg_ab *a) |
1296 | { |
1297 | if (!check_v1_3(dc)) { |
1298 | return false; |
1299 | } |
1300 | return do_fpcmp(dc, a, gen_helper_float_ult_s, false, false); |
1301 | } |
1302 | |
1303 | static bool trans_lf_sfugt_s(DisasContext *dc, arg_ab *a) |
1304 | { |
1305 | if (!check_v1_3(dc)) { |
1306 | return false; |
1307 | } |
1308 | return do_fpcmp(dc, a, gen_helper_float_ult_s, false, true); |
1309 | } |
1310 | |
1311 | static bool trans_lf_sfule_s(DisasContext *dc, arg_ab *a) |
1312 | { |
1313 | if (!check_v1_3(dc)) { |
1314 | return false; |
1315 | } |
1316 | return do_fpcmp(dc, a, gen_helper_float_ule_s, false, false); |
1317 | } |
1318 | |
1319 | static bool trans_lf_sfuge_s(DisasContext *dc, arg_ab *a) |
1320 | { |
1321 | if (!check_v1_3(dc)) { |
1322 | return false; |
1323 | } |
1324 | return do_fpcmp(dc, a, gen_helper_float_ule_s, false, true); |
1325 | } |
1326 | |
1327 | static bool trans_lf_sfun_s(DisasContext *dc, arg_ab *a) |
1328 | { |
1329 | if (!check_v1_3(dc)) { |
1330 | return false; |
1331 | } |
1332 | return do_fpcmp(dc, a, gen_helper_float_un_s, false, false); |
1333 | } |
1334 | |
1335 | static bool check_pair(DisasContext *dc, int r, int p) |
1336 | { |
1337 | return r + 1 + p < 32; |
1338 | } |
1339 | |
1340 | static void load_pair(DisasContext *dc, TCGv_i64 t, int r, int p) |
1341 | { |
1342 | tcg_gen_concat_i32_i64(t, cpu_R(dc, r + 1 + p), cpu_R(dc, r)); |
1343 | } |
1344 | |
1345 | static void save_pair(DisasContext *dc, TCGv_i64 t, int r, int p) |
1346 | { |
1347 | tcg_gen_extr_i64_i32(cpu_R(dc, r + 1 + p), cpu_R(dc, r), t); |
1348 | } |
1349 | |
1350 | static bool do_dp3(DisasContext *dc, arg_dab_pair *a, |
1351 | void (*fn)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64)) |
1352 | { |
1353 | TCGv_i64 t0, t1; |
1354 | |
1355 | if (!check_of64a32s(dc) || |
1356 | !check_pair(dc, a->a, a->ap) || |
1357 | !check_pair(dc, a->b, a->bp) || |
1358 | !check_pair(dc, a->d, a->dp)) { |
1359 | return false; |
1360 | } |
1361 | check_r0_write(dc, a->d); |
1362 | |
1363 | t0 = tcg_temp_new_i64(); |
1364 | t1 = tcg_temp_new_i64(); |
1365 | load_pair(dc, t0, a->a, a->ap); |
1366 | load_pair(dc, t1, a->b, a->bp); |
1367 | fn(t0, cpu_env, t0, t1); |
1368 | save_pair(dc, t0, a->d, a->dp); |
1369 | tcg_temp_free_i64(t0); |
1370 | tcg_temp_free_i64(t1); |
1371 | |
1372 | gen_helper_update_fpcsr(cpu_env); |
1373 | return true; |
1374 | } |
1375 | |
1376 | static bool do_dp2(DisasContext *dc, arg_da_pair *a, |
1377 | void (*fn)(TCGv_i64, TCGv_env, TCGv_i64)) |
1378 | { |
1379 | TCGv_i64 t0; |
1380 | |
1381 | if (!check_of64a32s(dc) || |
1382 | !check_pair(dc, a->a, a->ap) || |
1383 | !check_pair(dc, a->d, a->dp)) { |
1384 | return false; |
1385 | } |
1386 | check_r0_write(dc, a->d); |
1387 | |
1388 | t0 = tcg_temp_new_i64(); |
1389 | load_pair(dc, t0, a->a, a->ap); |
1390 | fn(t0, cpu_env, t0); |
1391 | save_pair(dc, t0, a->d, a->dp); |
1392 | tcg_temp_free_i64(t0); |
1393 | |
1394 | gen_helper_update_fpcsr(cpu_env); |
1395 | return true; |
1396 | } |
1397 | |
1398 | static bool do_dpcmp(DisasContext *dc, arg_ab_pair *a, |
1399 | void (*fn)(TCGv, TCGv_env, TCGv_i64, TCGv_i64), |
1400 | bool inv, bool swap) |
1401 | { |
1402 | TCGv_i64 t0, t1; |
1403 | |
1404 | if (!check_of64a32s(dc) || |
1405 | !check_pair(dc, a->a, a->ap) || |
1406 | !check_pair(dc, a->b, a->bp)) { |
1407 | return false; |
1408 | } |
1409 | |
1410 | t0 = tcg_temp_new_i64(); |
1411 | t1 = tcg_temp_new_i64(); |
1412 | load_pair(dc, t0, a->a, a->ap); |
1413 | load_pair(dc, t1, a->b, a->bp); |
1414 | if (swap) { |
1415 | fn(cpu_sr_f, cpu_env, t1, t0); |
1416 | } else { |
1417 | fn(cpu_sr_f, cpu_env, t0, t1); |
1418 | } |
1419 | tcg_temp_free_i64(t0); |
1420 | tcg_temp_free_i64(t1); |
1421 | |
1422 | if (inv) { |
1423 | tcg_gen_xori_tl(cpu_sr_f, cpu_sr_f, 1); |
1424 | } |
1425 | gen_helper_update_fpcsr(cpu_env); |
1426 | return true; |
1427 | } |
1428 | |
1429 | static bool trans_lf_add_d(DisasContext *dc, arg_dab_pair *a) |
1430 | { |
1431 | return do_dp3(dc, a, gen_helper_float_add_d); |
1432 | } |
1433 | |
1434 | static bool trans_lf_sub_d(DisasContext *dc, arg_dab_pair *a) |
1435 | { |
1436 | return do_dp3(dc, a, gen_helper_float_sub_d); |
1437 | } |
1438 | |
1439 | static bool trans_lf_mul_d(DisasContext *dc, arg_dab_pair *a) |
1440 | { |
1441 | return do_dp3(dc, a, gen_helper_float_mul_d); |
1442 | } |
1443 | |
1444 | static bool trans_lf_div_d(DisasContext *dc, arg_dab_pair *a) |
1445 | { |
1446 | return do_dp3(dc, a, gen_helper_float_div_d); |
1447 | } |
1448 | |
1449 | static bool trans_lf_rem_d(DisasContext *dc, arg_dab_pair *a) |
1450 | { |
1451 | return do_dp3(dc, a, gen_helper_float_rem_d); |
1452 | } |
1453 | |
1454 | static bool trans_lf_itof_d(DisasContext *dc, arg_da_pair *a) |
1455 | { |
1456 | return do_dp2(dc, a, gen_helper_itofd); |
1457 | } |
1458 | |
1459 | static bool trans_lf_ftoi_d(DisasContext *dc, arg_da_pair *a) |
1460 | { |
1461 | return do_dp2(dc, a, gen_helper_ftoid); |
1462 | } |
1463 | |
1464 | static bool trans_lf_stod_d(DisasContext *dc, arg_lf_stod_d *a) |
1465 | { |
1466 | TCGv_i64 t0; |
1467 | |
1468 | if (!check_of64a32s(dc) || |
1469 | !check_pair(dc, a->d, a->dp)) { |
1470 | return false; |
1471 | } |
1472 | check_r0_write(dc, a->d); |
1473 | |
1474 | t0 = tcg_temp_new_i64(); |
1475 | gen_helper_stod(t0, cpu_env, cpu_R(dc, a->a)); |
1476 | save_pair(dc, t0, a->d, a->dp); |
1477 | tcg_temp_free_i64(t0); |
1478 | |
1479 | gen_helper_update_fpcsr(cpu_env); |
1480 | return true; |
1481 | } |
1482 | |
1483 | static bool trans_lf_dtos_d(DisasContext *dc, arg_lf_dtos_d *a) |
1484 | { |
1485 | TCGv_i64 t0; |
1486 | |
1487 | if (!check_of64a32s(dc) || |
1488 | !check_pair(dc, a->a, a->ap)) { |
1489 | return false; |
1490 | } |
1491 | check_r0_write(dc, a->d); |
1492 | |
1493 | t0 = tcg_temp_new_i64(); |
1494 | load_pair(dc, t0, a->a, a->ap); |
1495 | gen_helper_dtos(cpu_R(dc, a->d), cpu_env, t0); |
1496 | tcg_temp_free_i64(t0); |
1497 | |
1498 | gen_helper_update_fpcsr(cpu_env); |
1499 | return true; |
1500 | } |
1501 | |
1502 | static bool trans_lf_madd_d(DisasContext *dc, arg_dab_pair *a) |
1503 | { |
1504 | TCGv_i64 t0, t1, t2; |
1505 | |
1506 | if (!check_of64a32s(dc) || |
1507 | !check_pair(dc, a->a, a->ap) || |
1508 | !check_pair(dc, a->b, a->bp) || |
1509 | !check_pair(dc, a->d, a->dp)) { |
1510 | return false; |
1511 | } |
1512 | check_r0_write(dc, a->d); |
1513 | |
1514 | t0 = tcg_temp_new_i64(); |
1515 | t1 = tcg_temp_new_i64(); |
1516 | t2 = tcg_temp_new_i64(); |
1517 | load_pair(dc, t0, a->d, a->dp); |
1518 | load_pair(dc, t1, a->a, a->ap); |
1519 | load_pair(dc, t2, a->b, a->bp); |
1520 | gen_helper_float_madd_d(t0, cpu_env, t0, t1, t2); |
1521 | save_pair(dc, t0, a->d, a->dp); |
1522 | tcg_temp_free_i64(t0); |
1523 | tcg_temp_free_i64(t1); |
1524 | tcg_temp_free_i64(t2); |
1525 | |
1526 | gen_helper_update_fpcsr(cpu_env); |
1527 | return true; |
1528 | } |
1529 | |
1530 | static bool trans_lf_sfeq_d(DisasContext *dc, arg_ab_pair *a) |
1531 | { |
1532 | return do_dpcmp(dc, a, gen_helper_float_eq_d, false, false); |
1533 | } |
1534 | |
1535 | static bool trans_lf_sfne_d(DisasContext *dc, arg_ab_pair *a) |
1536 | { |
1537 | return do_dpcmp(dc, a, gen_helper_float_eq_d, true, false); |
1538 | } |
1539 | |
1540 | static bool trans_lf_sfgt_d(DisasContext *dc, arg_ab_pair *a) |
1541 | { |
1542 | return do_dpcmp(dc, a, gen_helper_float_lt_d, false, true); |
1543 | } |
1544 | |
1545 | static bool trans_lf_sfge_d(DisasContext *dc, arg_ab_pair *a) |
1546 | { |
1547 | return do_dpcmp(dc, a, gen_helper_float_le_d, false, true); |
1548 | } |
1549 | |
1550 | static bool trans_lf_sflt_d(DisasContext *dc, arg_ab_pair *a) |
1551 | { |
1552 | return do_dpcmp(dc, a, gen_helper_float_lt_d, false, false); |
1553 | } |
1554 | |
1555 | static bool trans_lf_sfle_d(DisasContext *dc, arg_ab_pair *a) |
1556 | { |
1557 | return do_dpcmp(dc, a, gen_helper_float_le_d, false, false); |
1558 | } |
1559 | |
1560 | static bool trans_lf_sfueq_d(DisasContext *dc, arg_ab_pair *a) |
1561 | { |
1562 | return do_dpcmp(dc, a, gen_helper_float_ueq_d, false, false); |
1563 | } |
1564 | |
1565 | static bool trans_lf_sfule_d(DisasContext *dc, arg_ab_pair *a) |
1566 | { |
1567 | return do_dpcmp(dc, a, gen_helper_float_ule_d, false, false); |
1568 | } |
1569 | |
1570 | static bool trans_lf_sfuge_d(DisasContext *dc, arg_ab_pair *a) |
1571 | { |
1572 | return do_dpcmp(dc, a, gen_helper_float_ule_d, false, true); |
1573 | } |
1574 | |
1575 | static bool trans_lf_sfult_d(DisasContext *dc, arg_ab_pair *a) |
1576 | { |
1577 | return do_dpcmp(dc, a, gen_helper_float_ult_d, false, false); |
1578 | } |
1579 | |
1580 | static bool trans_lf_sfugt_d(DisasContext *dc, arg_ab_pair *a) |
1581 | { |
1582 | return do_dpcmp(dc, a, gen_helper_float_ult_d, false, true); |
1583 | } |
1584 | |
1585 | static bool trans_lf_sfun_d(DisasContext *dc, arg_ab_pair *a) |
1586 | { |
1587 | return do_dpcmp(dc, a, gen_helper_float_un_d, false, false); |
1588 | } |
1589 | |
1590 | static void openrisc_tr_init_disas_context(DisasContextBase *dcb, CPUState *cs) |
1591 | { |
1592 | DisasContext *dc = container_of(dcb, DisasContext, base); |
1593 | CPUOpenRISCState *env = cs->env_ptr; |
1594 | int bound; |
1595 | |
1596 | dc->mem_idx = cpu_mmu_index(env, false); |
1597 | dc->tb_flags = dc->base.tb->flags; |
1598 | dc->delayed_branch = (dc->tb_flags & TB_FLAGS_DFLAG) != 0; |
1599 | dc->cpucfgr = env->cpucfgr; |
1600 | dc->avr = env->avr; |
1601 | dc->jmp_pc_imm = -1; |
1602 | |
1603 | bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4; |
1604 | dc->base.max_insns = MIN(dc->base.max_insns, bound); |
1605 | } |
1606 | |
1607 | static void openrisc_tr_tb_start(DisasContextBase *db, CPUState *cs) |
1608 | { |
1609 | DisasContext *dc = container_of(db, DisasContext, base); |
1610 | |
1611 | /* Allow the TCG optimizer to see that R0 == 0, |
1612 | when it's true, which is the common case. */ |
1613 | if (dc->tb_flags & TB_FLAGS_R0_0) { |
1614 | dc->R0 = tcg_const_tl(0); |
1615 | } else { |
1616 | dc->R0 = cpu_regs[0]; |
1617 | } |
1618 | } |
1619 | |
1620 | static void openrisc_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) |
1621 | { |
1622 | DisasContext *dc = container_of(dcbase, DisasContext, base); |
1623 | |
1624 | tcg_gen_insn_start(dc->base.pc_next, (dc->delayed_branch ? 1 : 0) |
1625 | | (dc->base.num_insns > 1 ? 2 : 0)); |
1626 | } |
1627 | |
1628 | static bool openrisc_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cs, |
1629 | const CPUBreakpoint *bp) |
1630 | { |
1631 | DisasContext *dc = container_of(dcbase, DisasContext, base); |
1632 | |
1633 | tcg_gen_movi_tl(cpu_pc, dc->base.pc_next); |
1634 | gen_exception(dc, EXCP_DEBUG); |
1635 | dc->base.is_jmp = DISAS_NORETURN; |
1636 | /* The address covered by the breakpoint must be included in |
1637 | [tb->pc, tb->pc + tb->size) in order to for it to be |
1638 | properly cleared -- thus we increment the PC here so that |
1639 | the logic setting tb->size below does the right thing. */ |
1640 | dc->base.pc_next += 4; |
1641 | return true; |
1642 | } |
1643 | |
1644 | static void openrisc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) |
1645 | { |
1646 | DisasContext *dc = container_of(dcbase, DisasContext, base); |
1647 | OpenRISCCPU *cpu = OPENRISC_CPU(cs); |
1648 | uint32_t insn = cpu_ldl_code(&cpu->env, dc->base.pc_next); |
1649 | |
1650 | if (!decode(dc, insn)) { |
1651 | gen_illegal_exception(dc); |
1652 | } |
1653 | dc->base.pc_next += 4; |
1654 | |
1655 | /* When exiting the delay slot normally, exit via jmp_pc. |
1656 | * For DISAS_NORETURN, we have raised an exception and already exited. |
1657 | * For DISAS_EXIT, we found l.rfe in a delay slot. There's nothing |
1658 | * in the manual saying this is illegal, but it surely it should. |
1659 | * At least or1ksim overrides pcnext and ignores the branch. |
1660 | */ |
1661 | if (dc->delayed_branch |
1662 | && --dc->delayed_branch == 0 |
1663 | && dc->base.is_jmp == DISAS_NEXT) { |
1664 | dc->base.is_jmp = DISAS_JUMP; |
1665 | } |
1666 | } |
1667 | |
1668 | static void openrisc_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) |
1669 | { |
1670 | DisasContext *dc = container_of(dcbase, DisasContext, base); |
1671 | target_ulong jmp_dest; |
1672 | |
1673 | /* If we have already exited the TB, nothing following has effect. */ |
1674 | if (dc->base.is_jmp == DISAS_NORETURN) { |
1675 | return; |
1676 | } |
1677 | |
1678 | /* Adjust the delayed branch state for the next TB. */ |
1679 | if ((dc->tb_flags & TB_FLAGS_DFLAG ? 1 : 0) != (dc->delayed_branch != 0)) { |
1680 | tcg_gen_movi_i32(cpu_dflag, dc->delayed_branch != 0); |
1681 | } |
1682 | |
1683 | /* For DISAS_TOO_MANY, jump to the next insn. */ |
1684 | jmp_dest = dc->base.pc_next; |
1685 | tcg_gen_movi_tl(cpu_ppc, jmp_dest - 4); |
1686 | |
1687 | switch (dc->base.is_jmp) { |
1688 | case DISAS_JUMP: |
1689 | jmp_dest = dc->jmp_pc_imm; |
1690 | if (jmp_dest == -1) { |
1691 | /* The jump destination is indirect/computed; use jmp_pc. */ |
1692 | tcg_gen_mov_tl(cpu_pc, jmp_pc); |
1693 | tcg_gen_discard_tl(jmp_pc); |
1694 | if (unlikely(dc->base.singlestep_enabled)) { |
1695 | gen_exception(dc, EXCP_DEBUG); |
1696 | } else { |
1697 | tcg_gen_lookup_and_goto_ptr(); |
1698 | } |
1699 | break; |
1700 | } |
1701 | /* The jump destination is direct; use jmp_pc_imm. |
1702 | However, we will have stored into jmp_pc as well; |
1703 | we know now that it wasn't needed. */ |
1704 | tcg_gen_discard_tl(jmp_pc); |
1705 | /* fallthru */ |
1706 | |
1707 | case DISAS_TOO_MANY: |
1708 | if (unlikely(dc->base.singlestep_enabled)) { |
1709 | tcg_gen_movi_tl(cpu_pc, jmp_dest); |
1710 | gen_exception(dc, EXCP_DEBUG); |
1711 | } else if ((dc->base.pc_first ^ jmp_dest) & TARGET_PAGE_MASK) { |
1712 | tcg_gen_movi_tl(cpu_pc, jmp_dest); |
1713 | tcg_gen_lookup_and_goto_ptr(); |
1714 | } else { |
1715 | tcg_gen_goto_tb(0); |
1716 | tcg_gen_movi_tl(cpu_pc, jmp_dest); |
1717 | tcg_gen_exit_tb(dc->base.tb, 0); |
1718 | } |
1719 | break; |
1720 | |
1721 | case DISAS_EXIT: |
1722 | if (unlikely(dc->base.singlestep_enabled)) { |
1723 | gen_exception(dc, EXCP_DEBUG); |
1724 | } else { |
1725 | tcg_gen_exit_tb(NULL, 0); |
1726 | } |
1727 | break; |
1728 | default: |
1729 | g_assert_not_reached(); |
1730 | } |
1731 | } |
1732 | |
1733 | static void openrisc_tr_disas_log(const DisasContextBase *dcbase, CPUState *cs) |
1734 | { |
1735 | DisasContext *s = container_of(dcbase, DisasContext, base); |
1736 | |
1737 | qemu_log("IN: %s\n" , lookup_symbol(s->base.pc_first)); |
1738 | log_target_disas(cs, s->base.pc_first, s->base.tb->size); |
1739 | } |
1740 | |
1741 | static const TranslatorOps openrisc_tr_ops = { |
1742 | .init_disas_context = openrisc_tr_init_disas_context, |
1743 | .tb_start = openrisc_tr_tb_start, |
1744 | .insn_start = openrisc_tr_insn_start, |
1745 | .breakpoint_check = openrisc_tr_breakpoint_check, |
1746 | .translate_insn = openrisc_tr_translate_insn, |
1747 | .tb_stop = openrisc_tr_tb_stop, |
1748 | .disas_log = openrisc_tr_disas_log, |
1749 | }; |
1750 | |
1751 | void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns) |
1752 | { |
1753 | DisasContext ctx; |
1754 | |
1755 | translator_loop(&openrisc_tr_ops, &ctx.base, cs, tb, max_insns); |
1756 | } |
1757 | |
1758 | void openrisc_cpu_dump_state(CPUState *cs, FILE *f, int flags) |
1759 | { |
1760 | OpenRISCCPU *cpu = OPENRISC_CPU(cs); |
1761 | CPUOpenRISCState *env = &cpu->env; |
1762 | int i; |
1763 | |
1764 | qemu_fprintf(f, "PC=%08x\n" , env->pc); |
1765 | for (i = 0; i < 32; ++i) { |
1766 | qemu_fprintf(f, "R%02d=%08x%c" , i, cpu_get_gpr(env, i), |
1767 | (i % 4) == 3 ? '\n' : ' '); |
1768 | } |
1769 | } |
1770 | |
1771 | void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb, |
1772 | target_ulong *data) |
1773 | { |
1774 | env->pc = data[0]; |
1775 | env->dflag = data[1] & 1; |
1776 | if (data[1] & 2) { |
1777 | env->ppc = env->pc - 4; |
1778 | } |
1779 | } |
1780 | |