1/*
2 * LatticeMico32 main translation routines.
3 *
4 * Copyright (c) 2010 Michael Walle <michael@walle.cc>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "disas/disas.h"
23#include "exec/helper-proto.h"
24#include "exec/exec-all.h"
25#include "exec/translator.h"
26#include "tcg-op.h"
27#include "qemu/qemu-print.h"
28
29#include "exec/cpu_ldst.h"
30#include "hw/lm32/lm32_pic.h"
31
32#include "exec/helper-gen.h"
33
34#include "trace-tcg.h"
35#include "exec/log.h"
36
37
38#define DISAS_LM32 0
39
40#define LOG_DIS(...) \
41 do { \
42 if (DISAS_LM32) { \
43 qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__); \
44 } \
45 } while (0)
46
47#define EXTRACT_FIELD(src, start, end) \
48 (((src) >> start) & ((1 << (end - start + 1)) - 1))
49
50#define MEM_INDEX 0
51
52/* is_jmp field values */
53#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
54#define DISAS_UPDATE DISAS_TARGET_1 /* cpu state was modified dynamically */
55#define DISAS_TB_JUMP DISAS_TARGET_2 /* only pc was modified statically */
56
57static TCGv cpu_R[32];
58static TCGv cpu_pc;
59static TCGv cpu_ie;
60static TCGv cpu_icc;
61static TCGv cpu_dcc;
62static TCGv cpu_cc;
63static TCGv cpu_cfg;
64static TCGv cpu_eba;
65static TCGv cpu_dc;
66static TCGv cpu_deba;
67static TCGv cpu_bp[4];
68static TCGv cpu_wp[4];
69
70#include "exec/gen-icount.h"
71
72enum {
73 OP_FMT_RI,
74 OP_FMT_RR,
75 OP_FMT_CR,
76 OP_FMT_I
77};
78
79/* This is the state at translation time. */
80typedef struct DisasContext {
81 target_ulong pc;
82
83 /* Decoder. */
84 int format;
85 uint32_t ir;
86 uint8_t opcode;
87 uint8_t r0, r1, r2, csr;
88 uint16_t imm5;
89 uint16_t imm16;
90 uint32_t imm26;
91
92 unsigned int delayed_branch;
93 unsigned int tb_flags, synced_flags; /* tb dependent flags. */
94 int is_jmp;
95
96 struct TranslationBlock *tb;
97 int singlestep_enabled;
98
99 uint32_t features;
100 uint8_t num_breakpoints;
101 uint8_t num_watchpoints;
102} DisasContext;
103
104static const char *regnames[] = {
105 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
106 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
107 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
108 "r24", "r25", "r26/gp", "r27/fp", "r28/sp", "r29/ra",
109 "r30/ea", "r31/ba", "bp0", "bp1", "bp2", "bp3", "wp0",
110 "wp1", "wp2", "wp3"
111};
112
113static inline int zero_extend(unsigned int val, int width)
114{
115 return val & ((1 << width) - 1);
116}
117
118static inline int sign_extend(unsigned int val, int width)
119{
120 int sval;
121
122 /* LSL. */
123 val <<= 32 - width;
124 sval = val;
125 /* ASR. */
126 sval >>= 32 - width;
127
128 return sval;
129}
130
131static inline void t_gen_raise_exception(DisasContext *dc, uint32_t index)
132{
133 TCGv_i32 tmp = tcg_const_i32(index);
134
135 gen_helper_raise_exception(cpu_env, tmp);
136 tcg_temp_free_i32(tmp);
137}
138
139static inline void t_gen_illegal_insn(DisasContext *dc)
140{
141 tcg_gen_movi_tl(cpu_pc, dc->pc);
142 gen_helper_ill(cpu_env);
143}
144
145static inline bool use_goto_tb(DisasContext *dc, target_ulong dest)
146{
147 if (unlikely(dc->singlestep_enabled)) {
148 return false;
149 }
150
151#ifndef CONFIG_USER_ONLY
152 return (dc->tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
153#else
154 return true;
155#endif
156}
157
158static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
159{
160 if (use_goto_tb(dc, dest)) {
161 tcg_gen_goto_tb(n);
162 tcg_gen_movi_tl(cpu_pc, dest);
163 tcg_gen_exit_tb(dc->tb, n);
164 } else {
165 tcg_gen_movi_tl(cpu_pc, dest);
166 if (dc->singlestep_enabled) {
167 t_gen_raise_exception(dc, EXCP_DEBUG);
168 }
169 tcg_gen_exit_tb(NULL, 0);
170 }
171}
172
173static void dec_add(DisasContext *dc)
174{
175 if (dc->format == OP_FMT_RI) {
176 if (dc->r0 == R_R0) {
177 if (dc->r1 == R_R0 && dc->imm16 == 0) {
178 LOG_DIS("nop\n");
179 } else {
180 LOG_DIS("mvi r%d, %d\n", dc->r1, sign_extend(dc->imm16, 16));
181 }
182 } else {
183 LOG_DIS("addi r%d, r%d, %d\n", dc->r1, dc->r0,
184 sign_extend(dc->imm16, 16));
185 }
186 } else {
187 LOG_DIS("add r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
188 }
189
190 if (dc->format == OP_FMT_RI) {
191 tcg_gen_addi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
192 sign_extend(dc->imm16, 16));
193 } else {
194 tcg_gen_add_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
195 }
196}
197
198static void dec_and(DisasContext *dc)
199{
200 if (dc->format == OP_FMT_RI) {
201 LOG_DIS("andi r%d, r%d, %d\n", dc->r1, dc->r0,
202 zero_extend(dc->imm16, 16));
203 } else {
204 LOG_DIS("and r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
205 }
206
207 if (dc->format == OP_FMT_RI) {
208 tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0],
209 zero_extend(dc->imm16, 16));
210 } else {
211 if (dc->r0 == 0 && dc->r1 == 0 && dc->r2 == 0) {
212 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
213 gen_helper_hlt(cpu_env);
214 } else {
215 tcg_gen_and_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
216 }
217 }
218}
219
220static void dec_andhi(DisasContext *dc)
221{
222 LOG_DIS("andhi r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm16);
223
224 tcg_gen_andi_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
225}
226
227static void dec_b(DisasContext *dc)
228{
229 if (dc->r0 == R_RA) {
230 LOG_DIS("ret\n");
231 } else if (dc->r0 == R_EA) {
232 LOG_DIS("eret\n");
233 } else if (dc->r0 == R_BA) {
234 LOG_DIS("bret\n");
235 } else {
236 LOG_DIS("b r%d\n", dc->r0);
237 }
238
239 /* restore IE.IE in case of an eret */
240 if (dc->r0 == R_EA) {
241 TCGv t0 = tcg_temp_new();
242 TCGLabel *l1 = gen_new_label();
243 tcg_gen_andi_tl(t0, cpu_ie, IE_EIE);
244 tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
245 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_EIE, l1);
246 tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
247 gen_set_label(l1);
248 tcg_temp_free(t0);
249 } else if (dc->r0 == R_BA) {
250 TCGv t0 = tcg_temp_new();
251 TCGLabel *l1 = gen_new_label();
252 tcg_gen_andi_tl(t0, cpu_ie, IE_BIE);
253 tcg_gen_ori_tl(cpu_ie, cpu_ie, IE_IE);
254 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, IE_BIE, l1);
255 tcg_gen_andi_tl(cpu_ie, cpu_ie, ~IE_IE);
256 gen_set_label(l1);
257 tcg_temp_free(t0);
258 }
259 tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
260
261 dc->is_jmp = DISAS_JUMP;
262}
263
264static void dec_bi(DisasContext *dc)
265{
266 LOG_DIS("bi %d\n", sign_extend(dc->imm26 << 2, 26));
267
268 gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
269
270 dc->is_jmp = DISAS_TB_JUMP;
271}
272
273static inline void gen_cond_branch(DisasContext *dc, int cond)
274{
275 TCGLabel *l1 = gen_new_label();
276 tcg_gen_brcond_tl(cond, cpu_R[dc->r0], cpu_R[dc->r1], l1);
277 gen_goto_tb(dc, 0, dc->pc + 4);
278 gen_set_label(l1);
279 gen_goto_tb(dc, 1, dc->pc + (sign_extend(dc->imm16 << 2, 16)));
280 dc->is_jmp = DISAS_TB_JUMP;
281}
282
283static void dec_be(DisasContext *dc)
284{
285 LOG_DIS("be r%d, r%d, %d\n", dc->r1, dc->r0,
286 sign_extend(dc->imm16, 16) * 4);
287
288 gen_cond_branch(dc, TCG_COND_EQ);
289}
290
291static void dec_bg(DisasContext *dc)
292{
293 LOG_DIS("bg r%d, r%d, %d\n", dc->r1, dc->r0,
294 sign_extend(dc->imm16, 16 * 4));
295
296 gen_cond_branch(dc, TCG_COND_GT);
297}
298
299static void dec_bge(DisasContext *dc)
300{
301 LOG_DIS("bge r%d, r%d, %d\n", dc->r1, dc->r0,
302 sign_extend(dc->imm16, 16) * 4);
303
304 gen_cond_branch(dc, TCG_COND_GE);
305}
306
307static void dec_bgeu(DisasContext *dc)
308{
309 LOG_DIS("bgeu r%d, r%d, %d\n", dc->r1, dc->r0,
310 sign_extend(dc->imm16, 16) * 4);
311
312 gen_cond_branch(dc, TCG_COND_GEU);
313}
314
315static void dec_bgu(DisasContext *dc)
316{
317 LOG_DIS("bgu r%d, r%d, %d\n", dc->r1, dc->r0,
318 sign_extend(dc->imm16, 16) * 4);
319
320 gen_cond_branch(dc, TCG_COND_GTU);
321}
322
323static void dec_bne(DisasContext *dc)
324{
325 LOG_DIS("bne r%d, r%d, %d\n", dc->r1, dc->r0,
326 sign_extend(dc->imm16, 16) * 4);
327
328 gen_cond_branch(dc, TCG_COND_NE);
329}
330
331static void dec_call(DisasContext *dc)
332{
333 LOG_DIS("call r%d\n", dc->r0);
334
335 tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
336 tcg_gen_mov_tl(cpu_pc, cpu_R[dc->r0]);
337
338 dc->is_jmp = DISAS_JUMP;
339}
340
341static void dec_calli(DisasContext *dc)
342{
343 LOG_DIS("calli %d\n", sign_extend(dc->imm26, 26) * 4);
344
345 tcg_gen_movi_tl(cpu_R[R_RA], dc->pc + 4);
346 gen_goto_tb(dc, 0, dc->pc + (sign_extend(dc->imm26 << 2, 26)));
347
348 dc->is_jmp = DISAS_TB_JUMP;
349}
350
351static inline void gen_compare(DisasContext *dc, int cond)
352{
353 int i;
354
355 if (dc->format == OP_FMT_RI) {
356 switch (cond) {
357 case TCG_COND_GEU:
358 case TCG_COND_GTU:
359 i = zero_extend(dc->imm16, 16);
360 break;
361 default:
362 i = sign_extend(dc->imm16, 16);
363 break;
364 }
365
366 tcg_gen_setcondi_tl(cond, cpu_R[dc->r1], cpu_R[dc->r0], i);
367 } else {
368 tcg_gen_setcond_tl(cond, cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
369 }
370}
371
372static void dec_cmpe(DisasContext *dc)
373{
374 if (dc->format == OP_FMT_RI) {
375 LOG_DIS("cmpei r%d, r%d, %d\n", dc->r1, dc->r0,
376 sign_extend(dc->imm16, 16));
377 } else {
378 LOG_DIS("cmpe r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
379 }
380
381 gen_compare(dc, TCG_COND_EQ);
382}
383
384static void dec_cmpg(DisasContext *dc)
385{
386 if (dc->format == OP_FMT_RI) {
387 LOG_DIS("cmpgi r%d, r%d, %d\n", dc->r1, dc->r0,
388 sign_extend(dc->imm16, 16));
389 } else {
390 LOG_DIS("cmpg r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
391 }
392
393 gen_compare(dc, TCG_COND_GT);
394}
395
396static void dec_cmpge(DisasContext *dc)
397{
398 if (dc->format == OP_FMT_RI) {
399 LOG_DIS("cmpgei r%d, r%d, %d\n", dc->r1, dc->r0,
400 sign_extend(dc->imm16, 16));
401 } else {
402 LOG_DIS("cmpge r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
403 }
404
405 gen_compare(dc, TCG_COND_GE);
406}
407
408static void dec_cmpgeu(DisasContext *dc)
409{
410 if (dc->format == OP_FMT_RI) {
411 LOG_DIS("cmpgeui r%d, r%d, %d\n", dc->r1, dc->r0,
412 zero_extend(dc->imm16, 16));
413 } else {
414 LOG_DIS("cmpgeu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
415 }
416
417 gen_compare(dc, TCG_COND_GEU);
418}
419
420static void dec_cmpgu(DisasContext *dc)
421{
422 if (dc->format == OP_FMT_RI) {
423 LOG_DIS("cmpgui r%d, r%d, %d\n", dc->r1, dc->r0,
424 zero_extend(dc->imm16, 16));
425 } else {
426 LOG_DIS("cmpgu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
427 }
428
429 gen_compare(dc, TCG_COND_GTU);
430}
431
432static void dec_cmpne(DisasContext *dc)
433{
434 if (dc->format == OP_FMT_RI) {
435 LOG_DIS("cmpnei r%d, r%d, %d\n", dc->r1, dc->r0,
436 sign_extend(dc->imm16, 16));
437 } else {
438 LOG_DIS("cmpne r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
439 }
440
441 gen_compare(dc, TCG_COND_NE);
442}
443
444static void dec_divu(DisasContext *dc)
445{
446 TCGLabel *l1;
447
448 LOG_DIS("divu r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
449
450 if (!(dc->features & LM32_FEATURE_DIVIDE)) {
451 qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n");
452 t_gen_illegal_insn(dc);
453 return;
454 }
455
456 l1 = gen_new_label();
457 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
458 tcg_gen_movi_tl(cpu_pc, dc->pc);
459 t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
460 gen_set_label(l1);
461 tcg_gen_divu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
462}
463
464static void dec_lb(DisasContext *dc)
465{
466 TCGv t0;
467
468 LOG_DIS("lb r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
469
470 t0 = tcg_temp_new();
471 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
472 tcg_gen_qemu_ld8s(cpu_R[dc->r1], t0, MEM_INDEX);
473 tcg_temp_free(t0);
474}
475
476static void dec_lbu(DisasContext *dc)
477{
478 TCGv t0;
479
480 LOG_DIS("lbu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
481
482 t0 = tcg_temp_new();
483 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
484 tcg_gen_qemu_ld8u(cpu_R[dc->r1], t0, MEM_INDEX);
485 tcg_temp_free(t0);
486}
487
488static void dec_lh(DisasContext *dc)
489{
490 TCGv t0;
491
492 LOG_DIS("lh r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
493
494 t0 = tcg_temp_new();
495 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
496 tcg_gen_qemu_ld16s(cpu_R[dc->r1], t0, MEM_INDEX);
497 tcg_temp_free(t0);
498}
499
500static void dec_lhu(DisasContext *dc)
501{
502 TCGv t0;
503
504 LOG_DIS("lhu r%d, (r%d+%d)\n", dc->r1, dc->r0, dc->imm16);
505
506 t0 = tcg_temp_new();
507 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
508 tcg_gen_qemu_ld16u(cpu_R[dc->r1], t0, MEM_INDEX);
509 tcg_temp_free(t0);
510}
511
512static void dec_lw(DisasContext *dc)
513{
514 TCGv t0;
515
516 LOG_DIS("lw r%d, (r%d+%d)\n", dc->r1, dc->r0, sign_extend(dc->imm16, 16));
517
518 t0 = tcg_temp_new();
519 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
520 tcg_gen_qemu_ld32s(cpu_R[dc->r1], t0, MEM_INDEX);
521 tcg_temp_free(t0);
522}
523
524static void dec_modu(DisasContext *dc)
525{
526 TCGLabel *l1;
527
528 LOG_DIS("modu r%d, r%d, %d\n", dc->r2, dc->r0, dc->r1);
529
530 if (!(dc->features & LM32_FEATURE_DIVIDE)) {
531 qemu_log_mask(LOG_GUEST_ERROR, "hardware divider is not available\n");
532 t_gen_illegal_insn(dc);
533 return;
534 }
535
536 l1 = gen_new_label();
537 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[dc->r1], 0, l1);
538 tcg_gen_movi_tl(cpu_pc, dc->pc);
539 t_gen_raise_exception(dc, EXCP_DIVIDE_BY_ZERO);
540 gen_set_label(l1);
541 tcg_gen_remu_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
542}
543
544static void dec_mul(DisasContext *dc)
545{
546 if (dc->format == OP_FMT_RI) {
547 LOG_DIS("muli r%d, r%d, %d\n", dc->r1, dc->r0,
548 sign_extend(dc->imm16, 16));
549 } else {
550 LOG_DIS("mul r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
551 }
552
553 if (!(dc->features & LM32_FEATURE_MULTIPLY)) {
554 qemu_log_mask(LOG_GUEST_ERROR,
555 "hardware multiplier is not available\n");
556 t_gen_illegal_insn(dc);
557 return;
558 }
559
560 if (dc->format == OP_FMT_RI) {
561 tcg_gen_muli_tl(cpu_R[dc->r1], cpu_R[dc->r0],
562 sign_extend(dc->imm16, 16));
563 } else {
564 tcg_gen_mul_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
565 }
566}
567
568static void dec_nor(DisasContext *dc)
569{
570 if (dc->format == OP_FMT_RI) {
571 LOG_DIS("nori r%d, r%d, %d\n", dc->r1, dc->r0,
572 zero_extend(dc->imm16, 16));
573 } else {
574 LOG_DIS("nor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
575 }
576
577 if (dc->format == OP_FMT_RI) {
578 TCGv t0 = tcg_temp_new();
579 tcg_gen_movi_tl(t0, zero_extend(dc->imm16, 16));
580 tcg_gen_nor_tl(cpu_R[dc->r1], cpu_R[dc->r0], t0);
581 tcg_temp_free(t0);
582 } else {
583 tcg_gen_nor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
584 }
585}
586
587static void dec_or(DisasContext *dc)
588{
589 if (dc->format == OP_FMT_RI) {
590 LOG_DIS("ori r%d, r%d, %d\n", dc->r1, dc->r0,
591 zero_extend(dc->imm16, 16));
592 } else {
593 if (dc->r1 == R_R0) {
594 LOG_DIS("mv r%d, r%d\n", dc->r2, dc->r0);
595 } else {
596 LOG_DIS("or r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
597 }
598 }
599
600 if (dc->format == OP_FMT_RI) {
601 tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
602 zero_extend(dc->imm16, 16));
603 } else {
604 tcg_gen_or_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
605 }
606}
607
608static void dec_orhi(DisasContext *dc)
609{
610 if (dc->r0 == R_R0) {
611 LOG_DIS("mvhi r%d, %d\n", dc->r1, dc->imm16);
612 } else {
613 LOG_DIS("orhi r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm16);
614 }
615
616 tcg_gen_ori_tl(cpu_R[dc->r1], cpu_R[dc->r0], (dc->imm16 << 16));
617}
618
619static void dec_scall(DisasContext *dc)
620{
621 switch (dc->imm5) {
622 case 2:
623 LOG_DIS("break\n");
624 tcg_gen_movi_tl(cpu_pc, dc->pc);
625 t_gen_raise_exception(dc, EXCP_BREAKPOINT);
626 break;
627 case 7:
628 LOG_DIS("scall\n");
629 tcg_gen_movi_tl(cpu_pc, dc->pc);
630 t_gen_raise_exception(dc, EXCP_SYSTEMCALL);
631 break;
632 default:
633 qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode @0x%x", dc->pc);
634 t_gen_illegal_insn(dc);
635 break;
636 }
637}
638
639static void dec_rcsr(DisasContext *dc)
640{
641 LOG_DIS("rcsr r%d, %d\n", dc->r2, dc->csr);
642
643 switch (dc->csr) {
644 case CSR_IE:
645 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_ie);
646 break;
647 case CSR_IM:
648 gen_helper_rcsr_im(cpu_R[dc->r2], cpu_env);
649 break;
650 case CSR_IP:
651 gen_helper_rcsr_ip(cpu_R[dc->r2], cpu_env);
652 break;
653 case CSR_CC:
654 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cc);
655 break;
656 case CSR_CFG:
657 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_cfg);
658 break;
659 case CSR_EBA:
660 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_eba);
661 break;
662 case CSR_DC:
663 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_dc);
664 break;
665 case CSR_DEBA:
666 tcg_gen_mov_tl(cpu_R[dc->r2], cpu_deba);
667 break;
668 case CSR_JTX:
669 gen_helper_rcsr_jtx(cpu_R[dc->r2], cpu_env);
670 break;
671 case CSR_JRX:
672 gen_helper_rcsr_jrx(cpu_R[dc->r2], cpu_env);
673 break;
674 case CSR_ICC:
675 case CSR_DCC:
676 case CSR_BP0:
677 case CSR_BP1:
678 case CSR_BP2:
679 case CSR_BP3:
680 case CSR_WP0:
681 case CSR_WP1:
682 case CSR_WP2:
683 case CSR_WP3:
684 qemu_log_mask(LOG_GUEST_ERROR, "invalid read access csr=%x\n", dc->csr);
685 break;
686 default:
687 qemu_log_mask(LOG_GUEST_ERROR, "read_csr: unknown csr=%x\n", dc->csr);
688 break;
689 }
690}
691
692static void dec_sb(DisasContext *dc)
693{
694 TCGv t0;
695
696 LOG_DIS("sb (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
697
698 t0 = tcg_temp_new();
699 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
700 tcg_gen_qemu_st8(cpu_R[dc->r1], t0, MEM_INDEX);
701 tcg_temp_free(t0);
702}
703
704static void dec_sextb(DisasContext *dc)
705{
706 LOG_DIS("sextb r%d, r%d\n", dc->r2, dc->r0);
707
708 if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) {
709 qemu_log_mask(LOG_GUEST_ERROR,
710 "hardware sign extender is not available\n");
711 t_gen_illegal_insn(dc);
712 return;
713 }
714
715 tcg_gen_ext8s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
716}
717
718static void dec_sexth(DisasContext *dc)
719{
720 LOG_DIS("sexth r%d, r%d\n", dc->r2, dc->r0);
721
722 if (!(dc->features & LM32_FEATURE_SIGN_EXTEND)) {
723 qemu_log_mask(LOG_GUEST_ERROR,
724 "hardware sign extender is not available\n");
725 t_gen_illegal_insn(dc);
726 return;
727 }
728
729 tcg_gen_ext16s_tl(cpu_R[dc->r2], cpu_R[dc->r0]);
730}
731
732static void dec_sh(DisasContext *dc)
733{
734 TCGv t0;
735
736 LOG_DIS("sh (r%d+%d), r%d\n", dc->r0, dc->imm16, dc->r1);
737
738 t0 = tcg_temp_new();
739 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
740 tcg_gen_qemu_st16(cpu_R[dc->r1], t0, MEM_INDEX);
741 tcg_temp_free(t0);
742}
743
744static void dec_sl(DisasContext *dc)
745{
746 if (dc->format == OP_FMT_RI) {
747 LOG_DIS("sli r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
748 } else {
749 LOG_DIS("sl r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
750 }
751
752 if (!(dc->features & LM32_FEATURE_SHIFT)) {
753 qemu_log_mask(LOG_GUEST_ERROR, "hardware shifter is not available\n");
754 t_gen_illegal_insn(dc);
755 return;
756 }
757
758 if (dc->format == OP_FMT_RI) {
759 tcg_gen_shli_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
760 } else {
761 TCGv t0 = tcg_temp_new();
762 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
763 tcg_gen_shl_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
764 tcg_temp_free(t0);
765 }
766}
767
768static void dec_sr(DisasContext *dc)
769{
770 if (dc->format == OP_FMT_RI) {
771 LOG_DIS("sri r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
772 } else {
773 LOG_DIS("sr r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
774 }
775
776 /* The real CPU (w/o hardware shifter) only supports right shift by exactly
777 * one bit */
778 if (dc->format == OP_FMT_RI) {
779 if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) {
780 qemu_log_mask(LOG_GUEST_ERROR,
781 "hardware shifter is not available\n");
782 t_gen_illegal_insn(dc);
783 return;
784 }
785 tcg_gen_sari_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
786 } else {
787 TCGLabel *l1 = gen_new_label();
788 TCGLabel *l2 = gen_new_label();
789 TCGv t0 = tcg_temp_local_new();
790 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
791
792 if (!(dc->features & LM32_FEATURE_SHIFT)) {
793 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1);
794 t_gen_illegal_insn(dc);
795 tcg_gen_br(l2);
796 }
797
798 gen_set_label(l1);
799 tcg_gen_sar_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
800 gen_set_label(l2);
801
802 tcg_temp_free(t0);
803 }
804}
805
806static void dec_sru(DisasContext *dc)
807{
808 if (dc->format == OP_FMT_RI) {
809 LOG_DIS("srui r%d, r%d, %d\n", dc->r1, dc->r0, dc->imm5);
810 } else {
811 LOG_DIS("sru r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
812 }
813
814 if (dc->format == OP_FMT_RI) {
815 if (!(dc->features & LM32_FEATURE_SHIFT) && (dc->imm5 != 1)) {
816 qemu_log_mask(LOG_GUEST_ERROR,
817 "hardware shifter is not available\n");
818 t_gen_illegal_insn(dc);
819 return;
820 }
821 tcg_gen_shri_tl(cpu_R[dc->r1], cpu_R[dc->r0], dc->imm5);
822 } else {
823 TCGLabel *l1 = gen_new_label();
824 TCGLabel *l2 = gen_new_label();
825 TCGv t0 = tcg_temp_local_new();
826 tcg_gen_andi_tl(t0, cpu_R[dc->r1], 0x1f);
827
828 if (!(dc->features & LM32_FEATURE_SHIFT)) {
829 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 1, l1);
830 t_gen_illegal_insn(dc);
831 tcg_gen_br(l2);
832 }
833
834 gen_set_label(l1);
835 tcg_gen_shr_tl(cpu_R[dc->r2], cpu_R[dc->r0], t0);
836 gen_set_label(l2);
837
838 tcg_temp_free(t0);
839 }
840}
841
842static void dec_sub(DisasContext *dc)
843{
844 LOG_DIS("sub r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
845
846 tcg_gen_sub_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
847}
848
849static void dec_sw(DisasContext *dc)
850{
851 TCGv t0;
852
853 LOG_DIS("sw (r%d+%d), r%d\n", dc->r0, sign_extend(dc->imm16, 16), dc->r1);
854
855 t0 = tcg_temp_new();
856 tcg_gen_addi_tl(t0, cpu_R[dc->r0], sign_extend(dc->imm16, 16));
857 tcg_gen_qemu_st32(cpu_R[dc->r1], t0, MEM_INDEX);
858 tcg_temp_free(t0);
859}
860
861static void dec_user(DisasContext *dc)
862{
863 LOG_DIS("user");
864
865 qemu_log_mask(LOG_GUEST_ERROR, "user instruction undefined\n");
866 t_gen_illegal_insn(dc);
867}
868
869static void dec_wcsr(DisasContext *dc)
870{
871 int no;
872
873 LOG_DIS("wcsr %d, r%d\n", dc->csr, dc->r1);
874
875 switch (dc->csr) {
876 case CSR_IE:
877 tcg_gen_mov_tl(cpu_ie, cpu_R[dc->r1]);
878 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
879 dc->is_jmp = DISAS_UPDATE;
880 break;
881 case CSR_IM:
882 /* mark as an io operation because it could cause an interrupt */
883 if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
884 gen_io_start();
885 }
886 gen_helper_wcsr_im(cpu_env, cpu_R[dc->r1]);
887 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
888 dc->is_jmp = DISAS_UPDATE;
889 break;
890 case CSR_IP:
891 /* mark as an io operation because it could cause an interrupt */
892 if (tb_cflags(dc->tb) & CF_USE_ICOUNT) {
893 gen_io_start();
894 }
895 gen_helper_wcsr_ip(cpu_env, cpu_R[dc->r1]);
896 tcg_gen_movi_tl(cpu_pc, dc->pc + 4);
897 dc->is_jmp = DISAS_UPDATE;
898 break;
899 case CSR_ICC:
900 /* TODO */
901 break;
902 case CSR_DCC:
903 /* TODO */
904 break;
905 case CSR_EBA:
906 tcg_gen_mov_tl(cpu_eba, cpu_R[dc->r1]);
907 break;
908 case CSR_DEBA:
909 tcg_gen_mov_tl(cpu_deba, cpu_R[dc->r1]);
910 break;
911 case CSR_JTX:
912 gen_helper_wcsr_jtx(cpu_env, cpu_R[dc->r1]);
913 break;
914 case CSR_JRX:
915 gen_helper_wcsr_jrx(cpu_env, cpu_R[dc->r1]);
916 break;
917 case CSR_DC:
918 gen_helper_wcsr_dc(cpu_env, cpu_R[dc->r1]);
919 break;
920 case CSR_BP0:
921 case CSR_BP1:
922 case CSR_BP2:
923 case CSR_BP3:
924 no = dc->csr - CSR_BP0;
925 if (dc->num_breakpoints <= no) {
926 qemu_log_mask(LOG_GUEST_ERROR,
927 "breakpoint #%i is not available\n", no);
928 t_gen_illegal_insn(dc);
929 break;
930 }
931 gen_helper_wcsr_bp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no));
932 break;
933 case CSR_WP0:
934 case CSR_WP1:
935 case CSR_WP2:
936 case CSR_WP3:
937 no = dc->csr - CSR_WP0;
938 if (dc->num_watchpoints <= no) {
939 qemu_log_mask(LOG_GUEST_ERROR,
940 "watchpoint #%i is not available\n", no);
941 t_gen_illegal_insn(dc);
942 break;
943 }
944 gen_helper_wcsr_wp(cpu_env, cpu_R[dc->r1], tcg_const_i32(no));
945 break;
946 case CSR_CC:
947 case CSR_CFG:
948 qemu_log_mask(LOG_GUEST_ERROR, "invalid write access csr=%x\n",
949 dc->csr);
950 break;
951 default:
952 qemu_log_mask(LOG_GUEST_ERROR, "write_csr: unknown csr=%x\n",
953 dc->csr);
954 break;
955 }
956}
957
958static void dec_xnor(DisasContext *dc)
959{
960 if (dc->format == OP_FMT_RI) {
961 LOG_DIS("xnori r%d, r%d, %d\n", dc->r1, dc->r0,
962 zero_extend(dc->imm16, 16));
963 } else {
964 if (dc->r1 == R_R0) {
965 LOG_DIS("not r%d, r%d\n", dc->r2, dc->r0);
966 } else {
967 LOG_DIS("xnor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
968 }
969 }
970
971 if (dc->format == OP_FMT_RI) {
972 tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
973 zero_extend(dc->imm16, 16));
974 tcg_gen_not_tl(cpu_R[dc->r1], cpu_R[dc->r1]);
975 } else {
976 tcg_gen_eqv_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
977 }
978}
979
980static void dec_xor(DisasContext *dc)
981{
982 if (dc->format == OP_FMT_RI) {
983 LOG_DIS("xori r%d, r%d, %d\n", dc->r1, dc->r0,
984 zero_extend(dc->imm16, 16));
985 } else {
986 LOG_DIS("xor r%d, r%d, r%d\n", dc->r2, dc->r0, dc->r1);
987 }
988
989 if (dc->format == OP_FMT_RI) {
990 tcg_gen_xori_tl(cpu_R[dc->r1], cpu_R[dc->r0],
991 zero_extend(dc->imm16, 16));
992 } else {
993 tcg_gen_xor_tl(cpu_R[dc->r2], cpu_R[dc->r0], cpu_R[dc->r1]);
994 }
995}
996
997static void dec_ill(DisasContext *dc)
998{
999 qemu_log_mask(LOG_GUEST_ERROR, "invalid opcode 0x%02x\n", dc->opcode);
1000 t_gen_illegal_insn(dc);
1001}
1002
1003typedef void (*DecoderInfo)(DisasContext *dc);
1004static const DecoderInfo decinfo[] = {
1005 dec_sru, dec_nor, dec_mul, dec_sh, dec_lb, dec_sr, dec_xor, dec_lh,
1006 dec_and, dec_xnor, dec_lw, dec_lhu, dec_sb, dec_add, dec_or, dec_sl,
1007 dec_lbu, dec_be, dec_bg, dec_bge, dec_bgeu, dec_bgu, dec_sw, dec_bne,
1008 dec_andhi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_orhi,
1009 dec_cmpne,
1010 dec_sru, dec_nor, dec_mul, dec_divu, dec_rcsr, dec_sr, dec_xor, dec_ill,
1011 dec_and, dec_xnor, dec_ill, dec_scall, dec_sextb, dec_add, dec_or, dec_sl,
1012 dec_b, dec_modu, dec_sub, dec_user, dec_wcsr, dec_ill, dec_call, dec_sexth,
1013 dec_bi, dec_cmpe, dec_cmpg, dec_cmpge, dec_cmpgeu, dec_cmpgu, dec_calli,
1014 dec_cmpne
1015};
1016
1017static inline void decode(DisasContext *dc, uint32_t ir)
1018{
1019 dc->ir = ir;
1020 LOG_DIS("%8.8x\t", dc->ir);
1021
1022 dc->opcode = EXTRACT_FIELD(ir, 26, 31);
1023
1024 dc->imm5 = EXTRACT_FIELD(ir, 0, 4);
1025 dc->imm16 = EXTRACT_FIELD(ir, 0, 15);
1026 dc->imm26 = EXTRACT_FIELD(ir, 0, 25);
1027
1028 dc->csr = EXTRACT_FIELD(ir, 21, 25);
1029 dc->r0 = EXTRACT_FIELD(ir, 21, 25);
1030 dc->r1 = EXTRACT_FIELD(ir, 16, 20);
1031 dc->r2 = EXTRACT_FIELD(ir, 11, 15);
1032
1033 /* bit 31 seems to indicate insn type. */
1034 if (ir & (1 << 31)) {
1035 dc->format = OP_FMT_RR;
1036 } else {
1037 dc->format = OP_FMT_RI;
1038 }
1039
1040 assert(ARRAY_SIZE(decinfo) == 64);
1041 assert(dc->opcode < 64);
1042
1043 decinfo[dc->opcode](dc);
1044}
1045
1046/* generate intermediate code for basic block 'tb'. */
1047void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int max_insns)
1048{
1049 CPULM32State *env = cs->env_ptr;
1050 LM32CPU *cpu = env_archcpu(env);
1051 struct DisasContext ctx, *dc = &ctx;
1052 uint32_t pc_start;
1053 uint32_t page_start;
1054 int num_insns;
1055
1056 pc_start = tb->pc;
1057 dc->features = cpu->features;
1058 dc->num_breakpoints = cpu->num_breakpoints;
1059 dc->num_watchpoints = cpu->num_watchpoints;
1060 dc->tb = tb;
1061
1062 dc->is_jmp = DISAS_NEXT;
1063 dc->pc = pc_start;
1064 dc->singlestep_enabled = cs->singlestep_enabled;
1065
1066 if (pc_start & 3) {
1067 qemu_log_mask(LOG_GUEST_ERROR,
1068 "unaligned PC=%x. Ignoring lowest bits.\n", pc_start);
1069 pc_start &= ~3;
1070 }
1071
1072 page_start = pc_start & TARGET_PAGE_MASK;
1073 num_insns = 0;
1074
1075 gen_tb_start(tb);
1076 do {
1077 tcg_gen_insn_start(dc->pc);
1078 num_insns++;
1079
1080 if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
1081 tcg_gen_movi_tl(cpu_pc, dc->pc);
1082 t_gen_raise_exception(dc, EXCP_DEBUG);
1083 dc->is_jmp = DISAS_UPDATE;
1084 /* The address covered by the breakpoint must be included in
1085 [tb->pc, tb->pc + tb->size) in order to for it to be
1086 properly cleared -- thus we increment the PC here so that
1087 the logic setting tb->size below does the right thing. */
1088 dc->pc += 4;
1089 break;
1090 }
1091
1092 /* Pretty disas. */
1093 LOG_DIS("%8.8x:\t", dc->pc);
1094
1095 if (num_insns == max_insns && (tb_cflags(tb) & CF_LAST_IO)) {
1096 gen_io_start();
1097 }
1098
1099 decode(dc, cpu_ldl_code(env, dc->pc));
1100 dc->pc += 4;
1101 } while (!dc->is_jmp
1102 && !tcg_op_buf_full()
1103 && !cs->singlestep_enabled
1104 && !singlestep
1105 && (dc->pc - page_start < TARGET_PAGE_SIZE)
1106 && num_insns < max_insns);
1107
1108
1109 if (unlikely(cs->singlestep_enabled)) {
1110 if (dc->is_jmp == DISAS_NEXT) {
1111 tcg_gen_movi_tl(cpu_pc, dc->pc);
1112 }
1113 t_gen_raise_exception(dc, EXCP_DEBUG);
1114 } else {
1115 switch (dc->is_jmp) {
1116 case DISAS_NEXT:
1117 gen_goto_tb(dc, 1, dc->pc);
1118 break;
1119 default:
1120 case DISAS_JUMP:
1121 case DISAS_UPDATE:
1122 /* indicate that the hash table must be used
1123 to find the next TB */
1124 tcg_gen_exit_tb(NULL, 0);
1125 break;
1126 case DISAS_TB_JUMP:
1127 /* nothing more to generate */
1128 break;
1129 }
1130 }
1131
1132 gen_tb_end(tb, num_insns);
1133
1134 tb->size = dc->pc - pc_start;
1135 tb->icount = num_insns;
1136
1137#ifdef DEBUG_DISAS
1138 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
1139 && qemu_log_in_addr_range(pc_start)) {
1140 qemu_log_lock();
1141 qemu_log("\n");
1142 log_target_disas(cs, pc_start, dc->pc - pc_start);
1143 qemu_log_unlock();
1144 }
1145#endif
1146}
1147
1148void lm32_cpu_dump_state(CPUState *cs, FILE *f, int flags)
1149{
1150 LM32CPU *cpu = LM32_CPU(cs);
1151 CPULM32State *env = &cpu->env;
1152 int i;
1153
1154 if (!env) {
1155 return;
1156 }
1157
1158 qemu_fprintf(f, "IN: PC=%x %s\n",
1159 env->pc, lookup_symbol(env->pc));
1160
1161 qemu_fprintf(f, "ie=%8.8x (IE=%x EIE=%x BIE=%x) im=%8.8x ip=%8.8x\n",
1162 env->ie,
1163 (env->ie & IE_IE) ? 1 : 0,
1164 (env->ie & IE_EIE) ? 1 : 0,
1165 (env->ie & IE_BIE) ? 1 : 0,
1166 lm32_pic_get_im(env->pic_state),
1167 lm32_pic_get_ip(env->pic_state));
1168 qemu_fprintf(f, "eba=%8.8x deba=%8.8x\n",
1169 env->eba,
1170 env->deba);
1171
1172 for (i = 0; i < 32; i++) {
1173 qemu_fprintf(f, "r%2.2d=%8.8x ", i, env->regs[i]);
1174 if ((i + 1) % 4 == 0) {
1175 qemu_fprintf(f, "\n");
1176 }
1177 }
1178 qemu_fprintf(f, "\n\n");
1179}
1180
1181void restore_state_to_opc(CPULM32State *env, TranslationBlock *tb,
1182 target_ulong *data)
1183{
1184 env->pc = data[0];
1185}
1186
1187void lm32_translate_init(void)
1188{
1189 int i;
1190
1191 for (i = 0; i < ARRAY_SIZE(cpu_R); i++) {
1192 cpu_R[i] = tcg_global_mem_new(cpu_env,
1193 offsetof(CPULM32State, regs[i]),
1194 regnames[i]);
1195 }
1196
1197 for (i = 0; i < ARRAY_SIZE(cpu_bp); i++) {
1198 cpu_bp[i] = tcg_global_mem_new(cpu_env,
1199 offsetof(CPULM32State, bp[i]),
1200 regnames[32+i]);
1201 }
1202
1203 for (i = 0; i < ARRAY_SIZE(cpu_wp); i++) {
1204 cpu_wp[i] = tcg_global_mem_new(cpu_env,
1205 offsetof(CPULM32State, wp[i]),
1206 regnames[36+i]);
1207 }
1208
1209 cpu_pc = tcg_global_mem_new(cpu_env,
1210 offsetof(CPULM32State, pc),
1211 "pc");
1212 cpu_ie = tcg_global_mem_new(cpu_env,
1213 offsetof(CPULM32State, ie),
1214 "ie");
1215 cpu_icc = tcg_global_mem_new(cpu_env,
1216 offsetof(CPULM32State, icc),
1217 "icc");
1218 cpu_dcc = tcg_global_mem_new(cpu_env,
1219 offsetof(CPULM32State, dcc),
1220 "dcc");
1221 cpu_cc = tcg_global_mem_new(cpu_env,
1222 offsetof(CPULM32State, cc),
1223 "cc");
1224 cpu_cfg = tcg_global_mem_new(cpu_env,
1225 offsetof(CPULM32State, cfg),
1226 "cfg");
1227 cpu_eba = tcg_global_mem_new(cpu_env,
1228 offsetof(CPULM32State, eba),
1229 "eba");
1230 cpu_dc = tcg_global_mem_new(cpu_env,
1231 offsetof(CPULM32State, dc),
1232 "dc");
1233 cpu_deba = tcg_global_mem_new(cpu_env,
1234 offsetof(CPULM32State, deba),
1235 "deba");
1236}
1237
1238