1/* mips internal definitions and helpers
2 *
3 * This work is licensed under the terms of the GNU GPL, version 2 or later.
4 * See the COPYING file in the top-level directory.
5 */
6
7#ifndef MIPS_INTERNAL_H
8#define MIPS_INTERNAL_H
9
10#include "fpu/softfloat-helpers.h"
11
12/* MMU types, the first four entries have the same layout as the
13 CP0C0_MT field. */
14enum mips_mmu_types {
15 MMU_TYPE_NONE,
16 MMU_TYPE_R4000,
17 MMU_TYPE_RESERVED,
18 MMU_TYPE_FMT,
19 MMU_TYPE_R3000,
20 MMU_TYPE_R6000,
21 MMU_TYPE_R8000
22};
23
24struct mips_def_t {
25 const char *name;
26 int32_t CP0_PRid;
27 int32_t CP0_Config0;
28 int32_t CP0_Config1;
29 int32_t CP0_Config2;
30 int32_t CP0_Config3;
31 int32_t CP0_Config4;
32 int32_t CP0_Config4_rw_bitmask;
33 int32_t CP0_Config5;
34 int32_t CP0_Config5_rw_bitmask;
35 int32_t CP0_Config6;
36 int32_t CP0_Config7;
37 target_ulong CP0_LLAddr_rw_bitmask;
38 int CP0_LLAddr_shift;
39 int32_t SYNCI_Step;
40 int32_t CCRes;
41 int32_t CP0_Status_rw_bitmask;
42 int32_t CP0_TCStatus_rw_bitmask;
43 int32_t CP0_SRSCtl;
44 int32_t CP1_fcr0;
45 int32_t CP1_fcr31_rw_bitmask;
46 int32_t CP1_fcr31;
47 int32_t MSAIR;
48 int32_t SEGBITS;
49 int32_t PABITS;
50 int32_t CP0_SRSConf0_rw_bitmask;
51 int32_t CP0_SRSConf0;
52 int32_t CP0_SRSConf1_rw_bitmask;
53 int32_t CP0_SRSConf1;
54 int32_t CP0_SRSConf2_rw_bitmask;
55 int32_t CP0_SRSConf2;
56 int32_t CP0_SRSConf3_rw_bitmask;
57 int32_t CP0_SRSConf3;
58 int32_t CP0_SRSConf4_rw_bitmask;
59 int32_t CP0_SRSConf4;
60 int32_t CP0_PageGrain_rw_bitmask;
61 int32_t CP0_PageGrain;
62 target_ulong CP0_EBaseWG_rw_bitmask;
63 uint64_t insn_flags;
64 enum mips_mmu_types mmu_type;
65 int32_t SAARP;
66};
67
68extern const struct mips_def_t mips_defs[];
69extern const int mips_defs_number;
70
71enum CPUMIPSMSADataFormat {
72 DF_BYTE = 0,
73 DF_HALF,
74 DF_WORD,
75 DF_DOUBLE
76};
77
78void mips_cpu_do_interrupt(CPUState *cpu);
79bool mips_cpu_exec_interrupt(CPUState *cpu, int int_req);
80void mips_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
81hwaddr mips_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
82int mips_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
83int mips_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
84void mips_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
85 MMUAccessType access_type,
86 int mmu_idx, uintptr_t retaddr);
87
88#if !defined(CONFIG_USER_ONLY)
89
90typedef struct r4k_tlb_t r4k_tlb_t;
91struct r4k_tlb_t {
92 target_ulong VPN;
93 uint32_t PageMask;
94 uint16_t ASID;
95 unsigned int G:1;
96 unsigned int C0:3;
97 unsigned int C1:3;
98 unsigned int V0:1;
99 unsigned int V1:1;
100 unsigned int D0:1;
101 unsigned int D1:1;
102 unsigned int XI0:1;
103 unsigned int XI1:1;
104 unsigned int RI0:1;
105 unsigned int RI1:1;
106 unsigned int EHINV:1;
107 uint64_t PFN[2];
108};
109
110struct CPUMIPSTLBContext {
111 uint32_t nb_tlb;
112 uint32_t tlb_in_use;
113 int (*map_address)(struct CPUMIPSState *env, hwaddr *physical, int *prot,
114 target_ulong address, int rw, int access_type);
115 void (*helper_tlbwi)(struct CPUMIPSState *env);
116 void (*helper_tlbwr)(struct CPUMIPSState *env);
117 void (*helper_tlbp)(struct CPUMIPSState *env);
118 void (*helper_tlbr)(struct CPUMIPSState *env);
119 void (*helper_tlbinv)(struct CPUMIPSState *env);
120 void (*helper_tlbinvf)(struct CPUMIPSState *env);
121 union {
122 struct {
123 r4k_tlb_t tlb[MIPS_TLB_MAX];
124 } r4k;
125 } mmu;
126};
127
128int no_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
129 target_ulong address, int rw, int access_type);
130int fixed_mmu_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
131 target_ulong address, int rw, int access_type);
132int r4k_map_address(CPUMIPSState *env, hwaddr *physical, int *prot,
133 target_ulong address, int rw, int access_type);
134void r4k_helper_tlbwi(CPUMIPSState *env);
135void r4k_helper_tlbwr(CPUMIPSState *env);
136void r4k_helper_tlbp(CPUMIPSState *env);
137void r4k_helper_tlbr(CPUMIPSState *env);
138void r4k_helper_tlbinv(CPUMIPSState *env);
139void r4k_helper_tlbinvf(CPUMIPSState *env);
140void r4k_invalidate_tlb(CPUMIPSState *env, int idx, int use_extra);
141
142void mips_cpu_unassigned_access(CPUState *cpu, hwaddr addr,
143 bool is_write, bool is_exec, int unused,
144 unsigned size);
145hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address,
146 int rw);
147#endif
148
149#define cpu_signal_handler cpu_mips_signal_handler
150
151#ifndef CONFIG_USER_ONLY
152extern const VMStateDescription vmstate_mips_cpu;
153#endif
154
155static inline bool cpu_mips_hw_interrupts_enabled(CPUMIPSState *env)
156{
157 return (env->CP0_Status & (1 << CP0St_IE)) &&
158 !(env->CP0_Status & (1 << CP0St_EXL)) &&
159 !(env->CP0_Status & (1 << CP0St_ERL)) &&
160 !(env->hflags & MIPS_HFLAG_DM) &&
161 /* Note that the TCStatus IXMT field is initialized to zero,
162 and only MT capable cores can set it to one. So we don't
163 need to check for MT capabilities here. */
164 !(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_IXMT));
165}
166
167/* Check if there is pending and not masked out interrupt */
168static inline bool cpu_mips_hw_interrupts_pending(CPUMIPSState *env)
169{
170 int32_t pending;
171 int32_t status;
172 bool r;
173
174 pending = env->CP0_Cause & CP0Ca_IP_mask;
175 status = env->CP0_Status & CP0Ca_IP_mask;
176
177 if (env->CP0_Config3 & (1 << CP0C3_VEIC)) {
178 /* A MIPS configured with a vectorizing external interrupt controller
179 will feed a vector into the Cause pending lines. The core treats
180 the status lines as a vector level, not as indiviual masks. */
181 r = pending > status;
182 } else {
183 /* A MIPS configured with compatibility or VInt (Vectored Interrupts)
184 treats the pending lines as individual interrupt lines, the status
185 lines are individual masks. */
186 r = (pending & status) != 0;
187 }
188 return r;
189}
190
191void mips_tcg_init(void);
192
193/* TODO QOM'ify CPU reset and remove */
194void cpu_state_reset(CPUMIPSState *s);
195void cpu_mips_realize_env(CPUMIPSState *env);
196
197/* cp0_timer.c */
198uint32_t cpu_mips_get_random(CPUMIPSState *env);
199uint32_t cpu_mips_get_count(CPUMIPSState *env);
200void cpu_mips_store_count(CPUMIPSState *env, uint32_t value);
201void cpu_mips_store_compare(CPUMIPSState *env, uint32_t value);
202void cpu_mips_start_count(CPUMIPSState *env);
203void cpu_mips_stop_count(CPUMIPSState *env);
204
205/* helper.c */
206bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
207 MMUAccessType access_type, int mmu_idx,
208 bool probe, uintptr_t retaddr);
209
210/* op_helper.c */
211uint32_t float_class_s(uint32_t arg, float_status *fst);
212uint64_t float_class_d(uint64_t arg, float_status *fst);
213
214extern unsigned int ieee_rm[];
215int ieee_ex_to_mips(int xcpt);
216void update_pagemask(CPUMIPSState *env, target_ulong arg1, int32_t *pagemask);
217
218static inline void restore_rounding_mode(CPUMIPSState *env)
219{
220 set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3],
221 &env->active_fpu.fp_status);
222}
223
224static inline void restore_flush_mode(CPUMIPSState *env)
225{
226 set_flush_to_zero((env->active_fpu.fcr31 & (1 << FCR31_FS)) != 0,
227 &env->active_fpu.fp_status);
228}
229
230static inline void restore_snan_bit_mode(CPUMIPSState *env)
231{
232 set_snan_bit_is_one((env->active_fpu.fcr31 & (1 << FCR31_NAN2008)) == 0,
233 &env->active_fpu.fp_status);
234}
235
236static inline void restore_fp_status(CPUMIPSState *env)
237{
238 restore_rounding_mode(env);
239 restore_flush_mode(env);
240 restore_snan_bit_mode(env);
241}
242
243static inline void restore_msa_fp_status(CPUMIPSState *env)
244{
245 float_status *status = &env->active_tc.msa_fp_status;
246 int rounding_mode = (env->active_tc.msacsr & MSACSR_RM_MASK) >> MSACSR_RM;
247 bool flush_to_zero = (env->active_tc.msacsr & MSACSR_FS_MASK) != 0;
248
249 set_float_rounding_mode(ieee_rm[rounding_mode], status);
250 set_flush_to_zero(flush_to_zero, status);
251 set_flush_inputs_to_zero(flush_to_zero, status);
252}
253
254static inline void restore_pamask(CPUMIPSState *env)
255{
256 if (env->hflags & MIPS_HFLAG_ELPA) {
257 env->PAMask = (1ULL << env->PABITS) - 1;
258 } else {
259 env->PAMask = PAMASK_BASE;
260 }
261}
262
263static inline int mips_vpe_active(CPUMIPSState *env)
264{
265 int active = 1;
266
267 /* Check that the VPE is enabled. */
268 if (!(env->mvp->CP0_MVPControl & (1 << CP0MVPCo_EVP))) {
269 active = 0;
270 }
271 /* Check that the VPE is activated. */
272 if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))) {
273 active = 0;
274 }
275
276 /* Now verify that there are active thread contexts in the VPE.
277
278 This assumes the CPU model will internally reschedule threads
279 if the active one goes to sleep. If there are no threads available
280 the active one will be in a sleeping state, and we can turn off
281 the entire VPE. */
282 if (!(env->active_tc.CP0_TCStatus & (1 << CP0TCSt_A))) {
283 /* TC is not activated. */
284 active = 0;
285 }
286 if (env->active_tc.CP0_TCHalt & 1) {
287 /* TC is in halt state. */
288 active = 0;
289 }
290
291 return active;
292}
293
294static inline int mips_vp_active(CPUMIPSState *env)
295{
296 CPUState *other_cs = first_cpu;
297
298 /* Check if the VP disabled other VPs (which means the VP is enabled) */
299 if ((env->CP0_VPControl >> CP0VPCtl_DIS) & 1) {
300 return 1;
301 }
302
303 /* Check if the virtual processor is disabled due to a DVP */
304 CPU_FOREACH(other_cs) {
305 MIPSCPU *other_cpu = MIPS_CPU(other_cs);
306 if ((&other_cpu->env != env) &&
307 ((other_cpu->env.CP0_VPControl >> CP0VPCtl_DIS) & 1)) {
308 return 0;
309 }
310 }
311 return 1;
312}
313
314static inline void compute_hflags(CPUMIPSState *env)
315{
316 env->hflags &= ~(MIPS_HFLAG_COP1X | MIPS_HFLAG_64 | MIPS_HFLAG_CP0 |
317 MIPS_HFLAG_F64 | MIPS_HFLAG_FPU | MIPS_HFLAG_KSU |
318 MIPS_HFLAG_AWRAP | MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 |
319 MIPS_HFLAG_DSP_R3 | MIPS_HFLAG_SBRI | MIPS_HFLAG_MSA |
320 MIPS_HFLAG_FRE | MIPS_HFLAG_ELPA | MIPS_HFLAG_ERL);
321 if (env->CP0_Status & (1 << CP0St_ERL)) {
322 env->hflags |= MIPS_HFLAG_ERL;
323 }
324 if (!(env->CP0_Status & (1 << CP0St_EXL)) &&
325 !(env->CP0_Status & (1 << CP0St_ERL)) &&
326 !(env->hflags & MIPS_HFLAG_DM)) {
327 env->hflags |= (env->CP0_Status >> CP0St_KSU) & MIPS_HFLAG_KSU;
328 }
329#if defined(TARGET_MIPS64)
330 if ((env->insn_flags & ISA_MIPS3) &&
331 (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_UM) ||
332 (env->CP0_Status & (1 << CP0St_PX)) ||
333 (env->CP0_Status & (1 << CP0St_UX)))) {
334 env->hflags |= MIPS_HFLAG_64;
335 }
336
337 if (!(env->insn_flags & ISA_MIPS3)) {
338 env->hflags |= MIPS_HFLAG_AWRAP;
339 } else if (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_UM) &&
340 !(env->CP0_Status & (1 << CP0St_UX))) {
341 env->hflags |= MIPS_HFLAG_AWRAP;
342 } else if (env->insn_flags & ISA_MIPS64R6) {
343 /* Address wrapping for Supervisor and Kernel is specified in R6 */
344 if ((((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_SM) &&
345 !(env->CP0_Status & (1 << CP0St_SX))) ||
346 (((env->hflags & MIPS_HFLAG_KSU) == MIPS_HFLAG_KM) &&
347 !(env->CP0_Status & (1 << CP0St_KX)))) {
348 env->hflags |= MIPS_HFLAG_AWRAP;
349 }
350 }
351#endif
352 if (((env->CP0_Status & (1 << CP0St_CU0)) &&
353 !(env->insn_flags & ISA_MIPS32R6)) ||
354 !(env->hflags & MIPS_HFLAG_KSU)) {
355 env->hflags |= MIPS_HFLAG_CP0;
356 }
357 if (env->CP0_Status & (1 << CP0St_CU1)) {
358 env->hflags |= MIPS_HFLAG_FPU;
359 }
360 if (env->CP0_Status & (1 << CP0St_FR)) {
361 env->hflags |= MIPS_HFLAG_F64;
362 }
363 if (((env->hflags & MIPS_HFLAG_KSU) != MIPS_HFLAG_KM) &&
364 (env->CP0_Config5 & (1 << CP0C5_SBRI))) {
365 env->hflags |= MIPS_HFLAG_SBRI;
366 }
367 if (env->insn_flags & ASE_DSP_R3) {
368 /*
369 * Our cpu supports DSP R3 ASE, so enable
370 * access to DSP R3 resources.
371 */
372 if (env->CP0_Status & (1 << CP0St_MX)) {
373 env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2 |
374 MIPS_HFLAG_DSP_R3;
375 }
376 } else if (env->insn_flags & ASE_DSP_R2) {
377 /*
378 * Our cpu supports DSP R2 ASE, so enable
379 * access to DSP R2 resources.
380 */
381 if (env->CP0_Status & (1 << CP0St_MX)) {
382 env->hflags |= MIPS_HFLAG_DSP | MIPS_HFLAG_DSP_R2;
383 }
384
385 } else if (env->insn_flags & ASE_DSP) {
386 /*
387 * Our cpu supports DSP ASE, so enable
388 * access to DSP resources.
389 */
390 if (env->CP0_Status & (1 << CP0St_MX)) {
391 env->hflags |= MIPS_HFLAG_DSP;
392 }
393
394 }
395 if (env->insn_flags & ISA_MIPS32R2) {
396 if (env->active_fpu.fcr0 & (1 << FCR0_F64)) {
397 env->hflags |= MIPS_HFLAG_COP1X;
398 }
399 } else if (env->insn_flags & ISA_MIPS32) {
400 if (env->hflags & MIPS_HFLAG_64) {
401 env->hflags |= MIPS_HFLAG_COP1X;
402 }
403 } else if (env->insn_flags & ISA_MIPS4) {
404 /* All supported MIPS IV CPUs use the XX (CU3) to enable
405 and disable the MIPS IV extensions to the MIPS III ISA.
406 Some other MIPS IV CPUs ignore the bit, so the check here
407 would be too restrictive for them. */
408 if (env->CP0_Status & (1U << CP0St_CU3)) {
409 env->hflags |= MIPS_HFLAG_COP1X;
410 }
411 }
412 if (env->insn_flags & ASE_MSA) {
413 if (env->CP0_Config5 & (1 << CP0C5_MSAEn)) {
414 env->hflags |= MIPS_HFLAG_MSA;
415 }
416 }
417 if (env->active_fpu.fcr0 & (1 << FCR0_FREP)) {
418 if (env->CP0_Config5 & (1 << CP0C5_FRE)) {
419 env->hflags |= MIPS_HFLAG_FRE;
420 }
421 }
422 if (env->CP0_Config3 & (1 << CP0C3_LPA)) {
423 if (env->CP0_PageGrain & (1 << CP0PG_ELPA)) {
424 env->hflags |= MIPS_HFLAG_ELPA;
425 }
426 }
427}
428
429void cpu_mips_tlb_flush(CPUMIPSState *env);
430void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc);
431void cpu_mips_store_status(CPUMIPSState *env, target_ulong val);
432void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val);
433
434void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, uint32_t exception,
435 int error_code, uintptr_t pc);
436
437static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
438 uint32_t exception,
439 uintptr_t pc)
440{
441 do_raise_exception_err(env, exception, 0, pc);
442}
443
444#endif
445