1 | /* |
2 | * MIPS emulation helpers for qemu. |
3 | * |
4 | * Copyright (c) 2004-2005 Jocelyn Mayer |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
18 | */ |
19 | #include "qemu/osdep.h" |
20 | |
21 | #include "cpu.h" |
22 | #include "internal.h" |
23 | #include "exec/exec-all.h" |
24 | #include "exec/cpu_ldst.h" |
25 | #include "exec/log.h" |
26 | #include "hw/mips/cpudevs.h" |
27 | #include "qapi/qapi-commands-machine-target.h" |
28 | |
29 | enum { |
30 | TLBRET_XI = -6, |
31 | TLBRET_RI = -5, |
32 | TLBRET_DIRTY = -4, |
33 | TLBRET_INVALID = -3, |
34 | TLBRET_NOMATCH = -2, |
35 | TLBRET_BADADDR = -1, |
36 | TLBRET_MATCH = 0 |
37 | }; |
38 | |
39 | #if !defined(CONFIG_USER_ONLY) |
40 | |
41 | /* no MMU emulation */ |
42 | int no_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, |
43 | target_ulong address, int rw, int access_type) |
44 | { |
45 | *physical = address; |
46 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
47 | return TLBRET_MATCH; |
48 | } |
49 | |
50 | /* fixed mapping MMU emulation */ |
51 | int fixed_mmu_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, |
52 | target_ulong address, int rw, int access_type) |
53 | { |
54 | if (address <= (int32_t)0x7FFFFFFFUL) { |
55 | if (!(env->CP0_Status & (1 << CP0St_ERL))) |
56 | *physical = address + 0x40000000UL; |
57 | else |
58 | *physical = address; |
59 | } else if (address <= (int32_t)0xBFFFFFFFUL) |
60 | *physical = address & 0x1FFFFFFF; |
61 | else |
62 | *physical = address; |
63 | |
64 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
65 | return TLBRET_MATCH; |
66 | } |
67 | |
68 | /* MIPS32/MIPS64 R4000-style MMU emulation */ |
69 | int r4k_map_address (CPUMIPSState *env, hwaddr *physical, int *prot, |
70 | target_ulong address, int rw, int access_type) |
71 | { |
72 | uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; |
73 | int i; |
74 | |
75 | for (i = 0; i < env->tlb->tlb_in_use; i++) { |
76 | r4k_tlb_t *tlb = &env->tlb->mmu.r4k.tlb[i]; |
77 | /* 1k pages are not supported. */ |
78 | target_ulong mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); |
79 | target_ulong tag = address & ~mask; |
80 | target_ulong VPN = tlb->VPN & ~mask; |
81 | #if defined(TARGET_MIPS64) |
82 | tag &= env->SEGMask; |
83 | #endif |
84 | |
85 | /* Check ASID, virtual page number & size */ |
86 | if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag && !tlb->EHINV) { |
87 | /* TLB match */ |
88 | int n = !!(address & mask & ~(mask >> 1)); |
89 | /* Check access rights */ |
90 | if (!(n ? tlb->V1 : tlb->V0)) { |
91 | return TLBRET_INVALID; |
92 | } |
93 | if (rw == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) { |
94 | return TLBRET_XI; |
95 | } |
96 | if (rw == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) { |
97 | return TLBRET_RI; |
98 | } |
99 | if (rw != MMU_DATA_STORE || (n ? tlb->D1 : tlb->D0)) { |
100 | *physical = tlb->PFN[n] | (address & (mask >> 1)); |
101 | *prot = PAGE_READ; |
102 | if (n ? tlb->D1 : tlb->D0) |
103 | *prot |= PAGE_WRITE; |
104 | if (!(n ? tlb->XI1 : tlb->XI0)) { |
105 | *prot |= PAGE_EXEC; |
106 | } |
107 | return TLBRET_MATCH; |
108 | } |
109 | return TLBRET_DIRTY; |
110 | } |
111 | } |
112 | return TLBRET_NOMATCH; |
113 | } |
114 | |
115 | static int is_seg_am_mapped(unsigned int am, bool eu, int mmu_idx) |
116 | { |
117 | /* |
118 | * Interpret access control mode and mmu_idx. |
119 | * AdE? TLB? |
120 | * AM K S U E K S U E |
121 | * UK 0 0 1 1 0 0 - - 0 |
122 | * MK 1 0 1 1 0 1 - - !eu |
123 | * MSK 2 0 0 1 0 1 1 - !eu |
124 | * MUSK 3 0 0 0 0 1 1 1 !eu |
125 | * MUSUK 4 0 0 0 0 0 1 1 0 |
126 | * USK 5 0 0 1 0 0 0 - 0 |
127 | * - 6 - - - - - - - - |
128 | * UUSK 7 0 0 0 0 0 0 0 0 |
129 | */ |
130 | int32_t adetlb_mask; |
131 | |
132 | switch (mmu_idx) { |
133 | case 3 /* ERL */: |
134 | /* If EU is set, always unmapped */ |
135 | if (eu) { |
136 | return 0; |
137 | } |
138 | /* fall through */ |
139 | case MIPS_HFLAG_KM: |
140 | /* Never AdE, TLB mapped if AM={1,2,3} */ |
141 | adetlb_mask = 0x70000000; |
142 | goto check_tlb; |
143 | |
144 | case MIPS_HFLAG_SM: |
145 | /* AdE if AM={0,1}, TLB mapped if AM={2,3,4} */ |
146 | adetlb_mask = 0xc0380000; |
147 | goto check_ade; |
148 | |
149 | case MIPS_HFLAG_UM: |
150 | /* AdE if AM={0,1,2,5}, TLB mapped if AM={3,4} */ |
151 | adetlb_mask = 0xe4180000; |
152 | /* fall through */ |
153 | check_ade: |
154 | /* does this AM cause AdE in current execution mode */ |
155 | if ((adetlb_mask << am) < 0) { |
156 | return TLBRET_BADADDR; |
157 | } |
158 | adetlb_mask <<= 8; |
159 | /* fall through */ |
160 | check_tlb: |
161 | /* is this AM mapped in current execution mode */ |
162 | return ((adetlb_mask << am) < 0); |
163 | default: |
164 | assert(0); |
165 | return TLBRET_BADADDR; |
166 | }; |
167 | } |
168 | |
169 | static int get_seg_physical_address(CPUMIPSState *env, hwaddr *physical, |
170 | int *prot, target_ulong real_address, |
171 | int rw, int access_type, int mmu_idx, |
172 | unsigned int am, bool eu, |
173 | target_ulong segmask, |
174 | hwaddr physical_base) |
175 | { |
176 | int mapped = is_seg_am_mapped(am, eu, mmu_idx); |
177 | |
178 | if (mapped < 0) { |
179 | /* is_seg_am_mapped can report TLBRET_BADADDR */ |
180 | return mapped; |
181 | } else if (mapped) { |
182 | /* The segment is TLB mapped */ |
183 | return env->tlb->map_address(env, physical, prot, real_address, rw, |
184 | access_type); |
185 | } else { |
186 | /* The segment is unmapped */ |
187 | *physical = physical_base | (real_address & segmask); |
188 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
189 | return TLBRET_MATCH; |
190 | } |
191 | } |
192 | |
193 | static int get_segctl_physical_address(CPUMIPSState *env, hwaddr *physical, |
194 | int *prot, target_ulong real_address, |
195 | int rw, int access_type, int mmu_idx, |
196 | uint16_t segctl, target_ulong segmask) |
197 | { |
198 | unsigned int am = (segctl & CP0SC_AM_MASK) >> CP0SC_AM; |
199 | bool eu = (segctl >> CP0SC_EU) & 1; |
200 | hwaddr pa = ((hwaddr)segctl & CP0SC_PA_MASK) << 20; |
201 | |
202 | return get_seg_physical_address(env, physical, prot, real_address, rw, |
203 | access_type, mmu_idx, am, eu, segmask, |
204 | pa & ~(hwaddr)segmask); |
205 | } |
206 | |
207 | static int get_physical_address (CPUMIPSState *env, hwaddr *physical, |
208 | int *prot, target_ulong real_address, |
209 | int rw, int access_type, int mmu_idx) |
210 | { |
211 | /* User mode can only access useg/xuseg */ |
212 | #if defined(TARGET_MIPS64) |
213 | int user_mode = mmu_idx == MIPS_HFLAG_UM; |
214 | int supervisor_mode = mmu_idx == MIPS_HFLAG_SM; |
215 | int kernel_mode = !user_mode && !supervisor_mode; |
216 | int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; |
217 | int SX = (env->CP0_Status & (1 << CP0St_SX)) != 0; |
218 | int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; |
219 | #endif |
220 | int ret = TLBRET_MATCH; |
221 | /* effective address (modified for KVM T&E kernel segments) */ |
222 | target_ulong address = real_address; |
223 | |
224 | #define USEG_LIMIT ((target_ulong)(int32_t)0x7FFFFFFFUL) |
225 | #define KSEG0_BASE ((target_ulong)(int32_t)0x80000000UL) |
226 | #define KSEG1_BASE ((target_ulong)(int32_t)0xA0000000UL) |
227 | #define KSEG2_BASE ((target_ulong)(int32_t)0xC0000000UL) |
228 | #define KSEG3_BASE ((target_ulong)(int32_t)0xE0000000UL) |
229 | |
230 | #define KVM_KSEG0_BASE ((target_ulong)(int32_t)0x40000000UL) |
231 | #define KVM_KSEG2_BASE ((target_ulong)(int32_t)0x60000000UL) |
232 | |
233 | if (mips_um_ksegs_enabled()) { |
234 | /* KVM T&E adds guest kernel segments in useg */ |
235 | if (real_address >= KVM_KSEG0_BASE) { |
236 | if (real_address < KVM_KSEG2_BASE) { |
237 | /* kseg0 */ |
238 | address += KSEG0_BASE - KVM_KSEG0_BASE; |
239 | } else if (real_address <= USEG_LIMIT) { |
240 | /* kseg2/3 */ |
241 | address += KSEG2_BASE - KVM_KSEG2_BASE; |
242 | } |
243 | } |
244 | } |
245 | |
246 | if (address <= USEG_LIMIT) { |
247 | /* useg */ |
248 | uint16_t segctl; |
249 | |
250 | if (address >= 0x40000000UL) { |
251 | segctl = env->CP0_SegCtl2; |
252 | } else { |
253 | segctl = env->CP0_SegCtl2 >> 16; |
254 | } |
255 | ret = get_segctl_physical_address(env, physical, prot, real_address, rw, |
256 | access_type, mmu_idx, segctl, |
257 | 0x3FFFFFFF); |
258 | #if defined(TARGET_MIPS64) |
259 | } else if (address < 0x4000000000000000ULL) { |
260 | /* xuseg */ |
261 | if (UX && address <= (0x3FFFFFFFFFFFFFFFULL & env->SEGMask)) { |
262 | ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); |
263 | } else { |
264 | ret = TLBRET_BADADDR; |
265 | } |
266 | } else if (address < 0x8000000000000000ULL) { |
267 | /* xsseg */ |
268 | if ((supervisor_mode || kernel_mode) && |
269 | SX && address <= (0x7FFFFFFFFFFFFFFFULL & env->SEGMask)) { |
270 | ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); |
271 | } else { |
272 | ret = TLBRET_BADADDR; |
273 | } |
274 | } else if (address < 0xC000000000000000ULL) { |
275 | /* xkphys */ |
276 | if ((address & 0x07FFFFFFFFFFFFFFULL) <= env->PAMask) { |
277 | /* KX/SX/UX bit to check for each xkphys EVA access mode */ |
278 | static const uint8_t am_ksux[8] = { |
279 | [CP0SC_AM_UK] = (1u << CP0St_KX), |
280 | [CP0SC_AM_MK] = (1u << CP0St_KX), |
281 | [CP0SC_AM_MSK] = (1u << CP0St_SX), |
282 | [CP0SC_AM_MUSK] = (1u << CP0St_UX), |
283 | [CP0SC_AM_MUSUK] = (1u << CP0St_UX), |
284 | [CP0SC_AM_USK] = (1u << CP0St_SX), |
285 | [6] = (1u << CP0St_KX), |
286 | [CP0SC_AM_UUSK] = (1u << CP0St_UX), |
287 | }; |
288 | unsigned int am = CP0SC_AM_UK; |
289 | unsigned int xr = (env->CP0_SegCtl2 & CP0SC2_XR_MASK) >> CP0SC2_XR; |
290 | |
291 | if (xr & (1 << ((address >> 59) & 0x7))) { |
292 | am = (env->CP0_SegCtl1 & CP0SC1_XAM_MASK) >> CP0SC1_XAM; |
293 | } |
294 | /* Does CP0_Status.KX/SX/UX permit the access mode (am) */ |
295 | if (env->CP0_Status & am_ksux[am]) { |
296 | ret = get_seg_physical_address(env, physical, prot, |
297 | real_address, rw, access_type, |
298 | mmu_idx, am, false, env->PAMask, |
299 | 0); |
300 | } else { |
301 | ret = TLBRET_BADADDR; |
302 | } |
303 | } else { |
304 | ret = TLBRET_BADADDR; |
305 | } |
306 | } else if (address < 0xFFFFFFFF80000000ULL) { |
307 | /* xkseg */ |
308 | if (kernel_mode && KX && |
309 | address <= (0xFFFFFFFF7FFFFFFFULL & env->SEGMask)) { |
310 | ret = env->tlb->map_address(env, physical, prot, real_address, rw, access_type); |
311 | } else { |
312 | ret = TLBRET_BADADDR; |
313 | } |
314 | #endif |
315 | } else if (address < KSEG1_BASE) { |
316 | /* kseg0 */ |
317 | ret = get_segctl_physical_address(env, physical, prot, real_address, rw, |
318 | access_type, mmu_idx, |
319 | env->CP0_SegCtl1 >> 16, 0x1FFFFFFF); |
320 | } else if (address < KSEG2_BASE) { |
321 | /* kseg1 */ |
322 | ret = get_segctl_physical_address(env, physical, prot, real_address, rw, |
323 | access_type, mmu_idx, |
324 | env->CP0_SegCtl1, 0x1FFFFFFF); |
325 | } else if (address < KSEG3_BASE) { |
326 | /* sseg (kseg2) */ |
327 | ret = get_segctl_physical_address(env, physical, prot, real_address, rw, |
328 | access_type, mmu_idx, |
329 | env->CP0_SegCtl0 >> 16, 0x1FFFFFFF); |
330 | } else { |
331 | /* kseg3 */ |
332 | /* XXX: debug segment is not emulated */ |
333 | ret = get_segctl_physical_address(env, physical, prot, real_address, rw, |
334 | access_type, mmu_idx, |
335 | env->CP0_SegCtl0, 0x1FFFFFFF); |
336 | } |
337 | return ret; |
338 | } |
339 | |
340 | void cpu_mips_tlb_flush(CPUMIPSState *env) |
341 | { |
342 | /* Flush qemu's TLB and discard all shadowed entries. */ |
343 | tlb_flush(env_cpu(env)); |
344 | env->tlb->tlb_in_use = env->tlb->nb_tlb; |
345 | } |
346 | |
347 | /* Called for updates to CP0_Status. */ |
348 | void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc) |
349 | { |
350 | int32_t tcstatus, *tcst; |
351 | uint32_t v = cpu->CP0_Status; |
352 | uint32_t cu, mx, asid, ksu; |
353 | uint32_t mask = ((1 << CP0TCSt_TCU3) |
354 | | (1 << CP0TCSt_TCU2) |
355 | | (1 << CP0TCSt_TCU1) |
356 | | (1 << CP0TCSt_TCU0) |
357 | | (1 << CP0TCSt_TMX) |
358 | | (3 << CP0TCSt_TKSU) |
359 | | (0xff << CP0TCSt_TASID)); |
360 | |
361 | cu = (v >> CP0St_CU0) & 0xf; |
362 | mx = (v >> CP0St_MX) & 0x1; |
363 | ksu = (v >> CP0St_KSU) & 0x3; |
364 | asid = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; |
365 | |
366 | tcstatus = cu << CP0TCSt_TCU0; |
367 | tcstatus |= mx << CP0TCSt_TMX; |
368 | tcstatus |= ksu << CP0TCSt_TKSU; |
369 | tcstatus |= asid; |
370 | |
371 | if (tc == cpu->current_tc) { |
372 | tcst = &cpu->active_tc.CP0_TCStatus; |
373 | } else { |
374 | tcst = &cpu->tcs[tc].CP0_TCStatus; |
375 | } |
376 | |
377 | *tcst &= ~mask; |
378 | *tcst |= tcstatus; |
379 | compute_hflags(cpu); |
380 | } |
381 | |
382 | void cpu_mips_store_status(CPUMIPSState *env, target_ulong val) |
383 | { |
384 | uint32_t mask = env->CP0_Status_rw_bitmask; |
385 | target_ulong old = env->CP0_Status; |
386 | |
387 | if (env->insn_flags & ISA_MIPS32R6) { |
388 | bool has_supervisor = extract32(mask, CP0St_KSU, 2) == 0x3; |
389 | #if defined(TARGET_MIPS64) |
390 | uint32_t ksux = (1 << CP0St_KX) & val; |
391 | ksux |= (ksux >> 1) & val; /* KX = 0 forces SX to be 0 */ |
392 | ksux |= (ksux >> 1) & val; /* SX = 0 forces UX to be 0 */ |
393 | val = (val & ~(7 << CP0St_UX)) | ksux; |
394 | #endif |
395 | if (has_supervisor && extract32(val, CP0St_KSU, 2) == 0x3) { |
396 | mask &= ~(3 << CP0St_KSU); |
397 | } |
398 | mask &= ~(((1 << CP0St_SR) | (1 << CP0St_NMI)) & val); |
399 | } |
400 | |
401 | env->CP0_Status = (old & ~mask) | (val & mask); |
402 | #if defined(TARGET_MIPS64) |
403 | if ((env->CP0_Status ^ old) & (old & (7 << CP0St_UX))) { |
404 | /* Access to at least one of the 64-bit segments has been disabled */ |
405 | tlb_flush(env_cpu(env)); |
406 | } |
407 | #endif |
408 | if (env->CP0_Config3 & (1 << CP0C3_MT)) { |
409 | sync_c0_status(env, env, env->current_tc); |
410 | } else { |
411 | compute_hflags(env); |
412 | } |
413 | } |
414 | |
415 | void cpu_mips_store_cause(CPUMIPSState *env, target_ulong val) |
416 | { |
417 | uint32_t mask = 0x00C00300; |
418 | uint32_t old = env->CP0_Cause; |
419 | int i; |
420 | |
421 | if (env->insn_flags & ISA_MIPS32R2) { |
422 | mask |= 1 << CP0Ca_DC; |
423 | } |
424 | if (env->insn_flags & ISA_MIPS32R6) { |
425 | mask &= ~((1 << CP0Ca_WP) & val); |
426 | } |
427 | |
428 | env->CP0_Cause = (env->CP0_Cause & ~mask) | (val & mask); |
429 | |
430 | if ((old ^ env->CP0_Cause) & (1 << CP0Ca_DC)) { |
431 | if (env->CP0_Cause & (1 << CP0Ca_DC)) { |
432 | cpu_mips_stop_count(env); |
433 | } else { |
434 | cpu_mips_start_count(env); |
435 | } |
436 | } |
437 | |
438 | /* Set/reset software interrupts */ |
439 | for (i = 0 ; i < 2 ; i++) { |
440 | if ((old ^ env->CP0_Cause) & (1 << (CP0Ca_IP + i))) { |
441 | cpu_mips_soft_irq(env, i, env->CP0_Cause & (1 << (CP0Ca_IP + i))); |
442 | } |
443 | } |
444 | } |
445 | #endif |
446 | |
447 | static void raise_mmu_exception(CPUMIPSState *env, target_ulong address, |
448 | int rw, int tlb_error) |
449 | { |
450 | CPUState *cs = env_cpu(env); |
451 | int exception = 0, error_code = 0; |
452 | |
453 | if (rw == MMU_INST_FETCH) { |
454 | error_code |= EXCP_INST_NOTAVAIL; |
455 | } |
456 | |
457 | switch (tlb_error) { |
458 | default: |
459 | case TLBRET_BADADDR: |
460 | /* Reference to kernel address from user mode or supervisor mode */ |
461 | /* Reference to supervisor address from user mode */ |
462 | if (rw == MMU_DATA_STORE) { |
463 | exception = EXCP_AdES; |
464 | } else { |
465 | exception = EXCP_AdEL; |
466 | } |
467 | break; |
468 | case TLBRET_NOMATCH: |
469 | /* No TLB match for a mapped address */ |
470 | if (rw == MMU_DATA_STORE) { |
471 | exception = EXCP_TLBS; |
472 | } else { |
473 | exception = EXCP_TLBL; |
474 | } |
475 | error_code |= EXCP_TLB_NOMATCH; |
476 | break; |
477 | case TLBRET_INVALID: |
478 | /* TLB match with no valid bit */ |
479 | if (rw == MMU_DATA_STORE) { |
480 | exception = EXCP_TLBS; |
481 | } else { |
482 | exception = EXCP_TLBL; |
483 | } |
484 | break; |
485 | case TLBRET_DIRTY: |
486 | /* TLB match but 'D' bit is cleared */ |
487 | exception = EXCP_LTLBL; |
488 | break; |
489 | case TLBRET_XI: |
490 | /* Execute-Inhibit Exception */ |
491 | if (env->CP0_PageGrain & (1 << CP0PG_IEC)) { |
492 | exception = EXCP_TLBXI; |
493 | } else { |
494 | exception = EXCP_TLBL; |
495 | } |
496 | break; |
497 | case TLBRET_RI: |
498 | /* Read-Inhibit Exception */ |
499 | if (env->CP0_PageGrain & (1 << CP0PG_IEC)) { |
500 | exception = EXCP_TLBRI; |
501 | } else { |
502 | exception = EXCP_TLBL; |
503 | } |
504 | break; |
505 | } |
506 | /* Raise exception */ |
507 | if (!(env->hflags & MIPS_HFLAG_DM)) { |
508 | env->CP0_BadVAddr = address; |
509 | } |
510 | env->CP0_Context = (env->CP0_Context & ~0x007fffff) | |
511 | ((address >> 9) & 0x007ffff0); |
512 | env->CP0_EntryHi = (env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask) | |
513 | (env->CP0_EntryHi & (1 << CP0EnHi_EHINV)) | |
514 | (address & (TARGET_PAGE_MASK << 1)); |
515 | #if defined(TARGET_MIPS64) |
516 | env->CP0_EntryHi &= env->SEGMask; |
517 | env->CP0_XContext = |
518 | /* PTEBase */ (env->CP0_XContext & ((~0ULL) << (env->SEGBITS - 7))) | |
519 | /* R */ (extract64(address, 62, 2) << (env->SEGBITS - 9)) | |
520 | /* BadVPN2 */ (extract64(address, 13, env->SEGBITS - 13) << 4); |
521 | #endif |
522 | cs->exception_index = exception; |
523 | env->error_code = error_code; |
524 | } |
525 | |
526 | #if !defined(CONFIG_USER_ONLY) |
527 | hwaddr mips_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) |
528 | { |
529 | MIPSCPU *cpu = MIPS_CPU(cs); |
530 | CPUMIPSState *env = &cpu->env; |
531 | hwaddr phys_addr; |
532 | int prot; |
533 | |
534 | if (get_physical_address(env, &phys_addr, &prot, addr, 0, ACCESS_INT, |
535 | cpu_mmu_index(env, false)) != 0) { |
536 | return -1; |
537 | } |
538 | return phys_addr; |
539 | } |
540 | #endif |
541 | |
542 | #if !defined(CONFIG_USER_ONLY) |
543 | #if !defined(TARGET_MIPS64) |
544 | |
545 | /* |
546 | * Perform hardware page table walk |
547 | * |
548 | * Memory accesses are performed using the KERNEL privilege level. |
549 | * Synchronous exceptions detected on memory accesses cause a silent exit |
550 | * from page table walking, resulting in a TLB or XTLB Refill exception. |
551 | * |
552 | * Implementations are not required to support page table walk memory |
553 | * accesses from mapped memory regions. When an unsupported access is |
554 | * attempted, a silent exit is taken, resulting in a TLB or XTLB Refill |
555 | * exception. |
556 | * |
557 | * Note that if an exception is caused by AddressTranslation or LoadMemory |
558 | * functions, the exception is not taken, a silent exit is taken, |
559 | * resulting in a TLB or XTLB Refill exception. |
560 | */ |
561 | |
562 | static bool get_pte(CPUMIPSState *env, uint64_t vaddr, int entry_size, |
563 | uint64_t *pte) |
564 | { |
565 | if ((vaddr & ((entry_size >> 3) - 1)) != 0) { |
566 | return false; |
567 | } |
568 | if (entry_size == 64) { |
569 | *pte = cpu_ldq_code(env, vaddr); |
570 | } else { |
571 | *pte = cpu_ldl_code(env, vaddr); |
572 | } |
573 | return true; |
574 | } |
575 | |
576 | static uint64_t get_tlb_entry_layout(CPUMIPSState *env, uint64_t entry, |
577 | int entry_size, int ptei) |
578 | { |
579 | uint64_t result = entry; |
580 | uint64_t rixi; |
581 | if (ptei > entry_size) { |
582 | ptei -= 32; |
583 | } |
584 | result >>= (ptei - 2); |
585 | rixi = result & 3; |
586 | result >>= 2; |
587 | result |= rixi << CP0EnLo_XI; |
588 | return result; |
589 | } |
590 | |
591 | static int walk_directory(CPUMIPSState *env, uint64_t *vaddr, |
592 | int directory_index, bool *huge_page, bool *hgpg_directory_hit, |
593 | uint64_t *pw_entrylo0, uint64_t *pw_entrylo1) |
594 | { |
595 | int dph = (env->CP0_PWCtl >> CP0PC_DPH) & 0x1; |
596 | int psn = (env->CP0_PWCtl >> CP0PC_PSN) & 0x3F; |
597 | int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1; |
598 | int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; |
599 | int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F; |
600 | int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3; |
601 | int directory_shift = (ptew > 1) ? -1 : |
602 | (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift; |
603 | int leaf_shift = (ptew > 1) ? -1 : |
604 | (ptew == 1) ? native_shift + 1 : native_shift; |
605 | uint32_t direntry_size = 1 << (directory_shift + 3); |
606 | uint32_t leafentry_size = 1 << (leaf_shift + 3); |
607 | uint64_t entry; |
608 | uint64_t paddr; |
609 | int prot; |
610 | uint64_t lsb = 0; |
611 | uint64_t w = 0; |
612 | |
613 | if (get_physical_address(env, &paddr, &prot, *vaddr, MMU_DATA_LOAD, |
614 | ACCESS_INT, cpu_mmu_index(env, false)) != |
615 | TLBRET_MATCH) { |
616 | /* wrong base address */ |
617 | return 0; |
618 | } |
619 | if (!get_pte(env, *vaddr, direntry_size, &entry)) { |
620 | return 0; |
621 | } |
622 | |
623 | if ((entry & (1 << psn)) && hugepg) { |
624 | *huge_page = true; |
625 | *hgpg_directory_hit = true; |
626 | entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); |
627 | w = directory_index - 1; |
628 | if (directory_index & 0x1) { |
629 | /* Generate adjacent page from same PTE for odd TLB page */ |
630 | lsb = (1 << w) >> 6; |
631 | *pw_entrylo0 = entry & ~lsb; /* even page */ |
632 | *pw_entrylo1 = entry | lsb; /* odd page */ |
633 | } else if (dph) { |
634 | int oddpagebit = 1 << leaf_shift; |
635 | uint64_t vaddr2 = *vaddr ^ oddpagebit; |
636 | if (*vaddr & oddpagebit) { |
637 | *pw_entrylo1 = entry; |
638 | } else { |
639 | *pw_entrylo0 = entry; |
640 | } |
641 | if (get_physical_address(env, &paddr, &prot, vaddr2, MMU_DATA_LOAD, |
642 | ACCESS_INT, cpu_mmu_index(env, false)) != |
643 | TLBRET_MATCH) { |
644 | return 0; |
645 | } |
646 | if (!get_pte(env, vaddr2, leafentry_size, &entry)) { |
647 | return 0; |
648 | } |
649 | entry = get_tlb_entry_layout(env, entry, leafentry_size, pf_ptew); |
650 | if (*vaddr & oddpagebit) { |
651 | *pw_entrylo0 = entry; |
652 | } else { |
653 | *pw_entrylo1 = entry; |
654 | } |
655 | } else { |
656 | return 0; |
657 | } |
658 | return 1; |
659 | } else { |
660 | *vaddr = entry; |
661 | return 2; |
662 | } |
663 | } |
664 | |
665 | static bool page_table_walk_refill(CPUMIPSState *env, vaddr address, int rw, |
666 | int mmu_idx) |
667 | { |
668 | int gdw = (env->CP0_PWSize >> CP0PS_GDW) & 0x3F; |
669 | int udw = (env->CP0_PWSize >> CP0PS_UDW) & 0x3F; |
670 | int mdw = (env->CP0_PWSize >> CP0PS_MDW) & 0x3F; |
671 | int ptw = (env->CP0_PWSize >> CP0PS_PTW) & 0x3F; |
672 | int ptew = (env->CP0_PWSize >> CP0PS_PTEW) & 0x3F; |
673 | |
674 | /* Initial values */ |
675 | bool huge_page = false; |
676 | bool hgpg_bdhit = false; |
677 | bool hgpg_gdhit = false; |
678 | bool hgpg_udhit = false; |
679 | bool hgpg_mdhit = false; |
680 | |
681 | int32_t pw_pagemask = 0; |
682 | target_ulong pw_entryhi = 0; |
683 | uint64_t pw_entrylo0 = 0; |
684 | uint64_t pw_entrylo1 = 0; |
685 | |
686 | /* Native pointer size */ |
687 | /*For the 32-bit architectures, this bit is fixed to 0.*/ |
688 | int native_shift = (((env->CP0_PWSize >> CP0PS_PS) & 1) == 0) ? 2 : 3; |
689 | |
690 | /* Indices from PWField */ |
691 | int pf_gdw = (env->CP0_PWField >> CP0PF_GDW) & 0x3F; |
692 | int pf_udw = (env->CP0_PWField >> CP0PF_UDW) & 0x3F; |
693 | int pf_mdw = (env->CP0_PWField >> CP0PF_MDW) & 0x3F; |
694 | int pf_ptw = (env->CP0_PWField >> CP0PF_PTW) & 0x3F; |
695 | int pf_ptew = (env->CP0_PWField >> CP0PF_PTEW) & 0x3F; |
696 | |
697 | /* Indices computed from faulting address */ |
698 | int gindex = (address >> pf_gdw) & ((1 << gdw) - 1); |
699 | int uindex = (address >> pf_udw) & ((1 << udw) - 1); |
700 | int mindex = (address >> pf_mdw) & ((1 << mdw) - 1); |
701 | int ptindex = (address >> pf_ptw) & ((1 << ptw) - 1); |
702 | |
703 | /* Other HTW configs */ |
704 | int hugepg = (env->CP0_PWCtl >> CP0PC_HUGEPG) & 0x1; |
705 | |
706 | /* HTW Shift values (depend on entry size) */ |
707 | int directory_shift = (ptew > 1) ? -1 : |
708 | (hugepg && (ptew == 1)) ? native_shift + 1 : native_shift; |
709 | int leaf_shift = (ptew > 1) ? -1 : |
710 | (ptew == 1) ? native_shift + 1 : native_shift; |
711 | |
712 | /* Offsets into tables */ |
713 | int goffset = gindex << directory_shift; |
714 | int uoffset = uindex << directory_shift; |
715 | int moffset = mindex << directory_shift; |
716 | int ptoffset0 = (ptindex >> 1) << (leaf_shift + 1); |
717 | int ptoffset1 = ptoffset0 | (1 << (leaf_shift)); |
718 | |
719 | uint32_t leafentry_size = 1 << (leaf_shift + 3); |
720 | |
721 | /* Starting address - Page Table Base */ |
722 | uint64_t vaddr = env->CP0_PWBase; |
723 | |
724 | uint64_t dir_entry; |
725 | uint64_t paddr; |
726 | int prot; |
727 | int m; |
728 | |
729 | if (!(env->CP0_Config3 & (1 << CP0C3_PW))) { |
730 | /* walker is unimplemented */ |
731 | return false; |
732 | } |
733 | if (!(env->CP0_PWCtl & (1 << CP0PC_PWEN))) { |
734 | /* walker is disabled */ |
735 | return false; |
736 | } |
737 | if (!(gdw > 0 || udw > 0 || mdw > 0)) { |
738 | /* no structure to walk */ |
739 | return false; |
740 | } |
741 | if ((directory_shift == -1) || (leaf_shift == -1)) { |
742 | return false; |
743 | } |
744 | |
745 | /* Global Directory */ |
746 | if (gdw > 0) { |
747 | vaddr |= goffset; |
748 | switch (walk_directory(env, &vaddr, pf_gdw, &huge_page, &hgpg_gdhit, |
749 | &pw_entrylo0, &pw_entrylo1)) |
750 | { |
751 | case 0: |
752 | return false; |
753 | case 1: |
754 | goto refill; |
755 | case 2: |
756 | default: |
757 | break; |
758 | } |
759 | } |
760 | |
761 | /* Upper directory */ |
762 | if (udw > 0) { |
763 | vaddr |= uoffset; |
764 | switch (walk_directory(env, &vaddr, pf_udw, &huge_page, &hgpg_udhit, |
765 | &pw_entrylo0, &pw_entrylo1)) |
766 | { |
767 | case 0: |
768 | return false; |
769 | case 1: |
770 | goto refill; |
771 | case 2: |
772 | default: |
773 | break; |
774 | } |
775 | } |
776 | |
777 | /* Middle directory */ |
778 | if (mdw > 0) { |
779 | vaddr |= moffset; |
780 | switch (walk_directory(env, &vaddr, pf_mdw, &huge_page, &hgpg_mdhit, |
781 | &pw_entrylo0, &pw_entrylo1)) |
782 | { |
783 | case 0: |
784 | return false; |
785 | case 1: |
786 | goto refill; |
787 | case 2: |
788 | default: |
789 | break; |
790 | } |
791 | } |
792 | |
793 | /* Leaf Level Page Table - First half of PTE pair */ |
794 | vaddr |= ptoffset0; |
795 | if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD, |
796 | ACCESS_INT, cpu_mmu_index(env, false)) != |
797 | TLBRET_MATCH) { |
798 | return false; |
799 | } |
800 | if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) { |
801 | return false; |
802 | } |
803 | dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew); |
804 | pw_entrylo0 = dir_entry; |
805 | |
806 | /* Leaf Level Page Table - Second half of PTE pair */ |
807 | vaddr |= ptoffset1; |
808 | if (get_physical_address(env, &paddr, &prot, vaddr, MMU_DATA_LOAD, |
809 | ACCESS_INT, cpu_mmu_index(env, false)) != |
810 | TLBRET_MATCH) { |
811 | return false; |
812 | } |
813 | if (!get_pte(env, vaddr, leafentry_size, &dir_entry)) { |
814 | return false; |
815 | } |
816 | dir_entry = get_tlb_entry_layout(env, dir_entry, leafentry_size, pf_ptew); |
817 | pw_entrylo1 = dir_entry; |
818 | |
819 | refill: |
820 | |
821 | m = (1 << pf_ptw) - 1; |
822 | |
823 | if (huge_page) { |
824 | switch (hgpg_bdhit << 3 | hgpg_gdhit << 2 | hgpg_udhit << 1 | |
825 | hgpg_mdhit) |
826 | { |
827 | case 4: |
828 | m = (1 << pf_gdw) - 1; |
829 | if (pf_gdw & 1) { |
830 | m >>= 1; |
831 | } |
832 | break; |
833 | case 2: |
834 | m = (1 << pf_udw) - 1; |
835 | if (pf_udw & 1) { |
836 | m >>= 1; |
837 | } |
838 | break; |
839 | case 1: |
840 | m = (1 << pf_mdw) - 1; |
841 | if (pf_mdw & 1) { |
842 | m >>= 1; |
843 | } |
844 | break; |
845 | } |
846 | } |
847 | pw_pagemask = m >> 12; |
848 | update_pagemask(env, pw_pagemask << 13, &pw_pagemask); |
849 | pw_entryhi = (address & ~0x1fff) | (env->CP0_EntryHi & 0xFF); |
850 | { |
851 | target_ulong tmp_entryhi = env->CP0_EntryHi; |
852 | int32_t tmp_pagemask = env->CP0_PageMask; |
853 | uint64_t tmp_entrylo0 = env->CP0_EntryLo0; |
854 | uint64_t tmp_entrylo1 = env->CP0_EntryLo1; |
855 | |
856 | env->CP0_EntryHi = pw_entryhi; |
857 | env->CP0_PageMask = pw_pagemask; |
858 | env->CP0_EntryLo0 = pw_entrylo0; |
859 | env->CP0_EntryLo1 = pw_entrylo1; |
860 | |
861 | /* |
862 | * The hardware page walker inserts a page into the TLB in a manner |
863 | * identical to a TLBWR instruction as executed by the software refill |
864 | * handler. |
865 | */ |
866 | r4k_helper_tlbwr(env); |
867 | |
868 | env->CP0_EntryHi = tmp_entryhi; |
869 | env->CP0_PageMask = tmp_pagemask; |
870 | env->CP0_EntryLo0 = tmp_entrylo0; |
871 | env->CP0_EntryLo1 = tmp_entrylo1; |
872 | } |
873 | return true; |
874 | } |
875 | #endif |
876 | #endif |
877 | |
878 | bool mips_cpu_tlb_fill(CPUState *cs, vaddr address, int size, |
879 | MMUAccessType access_type, int mmu_idx, |
880 | bool probe, uintptr_t retaddr) |
881 | { |
882 | MIPSCPU *cpu = MIPS_CPU(cs); |
883 | CPUMIPSState *env = &cpu->env; |
884 | #if !defined(CONFIG_USER_ONLY) |
885 | hwaddr physical; |
886 | int prot; |
887 | int mips_access_type; |
888 | #endif |
889 | int ret = TLBRET_BADADDR; |
890 | |
891 | /* data access */ |
892 | #if !defined(CONFIG_USER_ONLY) |
893 | /* XXX: put correct access by using cpu_restore_state() correctly */ |
894 | mips_access_type = ACCESS_INT; |
895 | ret = get_physical_address(env, &physical, &prot, address, |
896 | access_type, mips_access_type, mmu_idx); |
897 | switch (ret) { |
898 | case TLBRET_MATCH: |
899 | qemu_log_mask(CPU_LOG_MMU, |
900 | "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx |
901 | " prot %d\n" , __func__, address, physical, prot); |
902 | break; |
903 | default: |
904 | qemu_log_mask(CPU_LOG_MMU, |
905 | "%s address=%" VADDR_PRIx " ret %d\n" , __func__, address, |
906 | ret); |
907 | break; |
908 | } |
909 | if (ret == TLBRET_MATCH) { |
910 | tlb_set_page(cs, address & TARGET_PAGE_MASK, |
911 | physical & TARGET_PAGE_MASK, prot, |
912 | mmu_idx, TARGET_PAGE_SIZE); |
913 | return true; |
914 | } |
915 | #if !defined(TARGET_MIPS64) |
916 | if ((ret == TLBRET_NOMATCH) && (env->tlb->nb_tlb > 1)) { |
917 | /* |
918 | * Memory reads during hardware page table walking are performed |
919 | * as if they were kernel-mode load instructions. |
920 | */ |
921 | int mode = (env->hflags & MIPS_HFLAG_KSU); |
922 | bool ret_walker; |
923 | env->hflags &= ~MIPS_HFLAG_KSU; |
924 | ret_walker = page_table_walk_refill(env, address, access_type, mmu_idx); |
925 | env->hflags |= mode; |
926 | if (ret_walker) { |
927 | ret = get_physical_address(env, &physical, &prot, address, |
928 | access_type, mips_access_type, mmu_idx); |
929 | if (ret == TLBRET_MATCH) { |
930 | tlb_set_page(cs, address & TARGET_PAGE_MASK, |
931 | physical & TARGET_PAGE_MASK, prot, |
932 | mmu_idx, TARGET_PAGE_SIZE); |
933 | return true; |
934 | } |
935 | } |
936 | } |
937 | #endif |
938 | if (probe) { |
939 | return false; |
940 | } |
941 | #endif |
942 | |
943 | raise_mmu_exception(env, address, access_type, ret); |
944 | do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr); |
945 | } |
946 | |
947 | #ifndef CONFIG_USER_ONLY |
948 | hwaddr cpu_mips_translate_address(CPUMIPSState *env, target_ulong address, int rw) |
949 | { |
950 | hwaddr physical; |
951 | int prot; |
952 | int access_type; |
953 | int ret = 0; |
954 | |
955 | /* data access */ |
956 | access_type = ACCESS_INT; |
957 | ret = get_physical_address(env, &physical, &prot, address, rw, access_type, |
958 | cpu_mmu_index(env, false)); |
959 | if (ret != TLBRET_MATCH) { |
960 | raise_mmu_exception(env, address, rw, ret); |
961 | return -1LL; |
962 | } else { |
963 | return physical; |
964 | } |
965 | } |
966 | |
967 | static const char * const excp_names[EXCP_LAST + 1] = { |
968 | [EXCP_RESET] = "reset" , |
969 | [EXCP_SRESET] = "soft reset" , |
970 | [EXCP_DSS] = "debug single step" , |
971 | [EXCP_DINT] = "debug interrupt" , |
972 | [EXCP_NMI] = "non-maskable interrupt" , |
973 | [EXCP_MCHECK] = "machine check" , |
974 | [EXCP_EXT_INTERRUPT] = "interrupt" , |
975 | [EXCP_DFWATCH] = "deferred watchpoint" , |
976 | [EXCP_DIB] = "debug instruction breakpoint" , |
977 | [EXCP_IWATCH] = "instruction fetch watchpoint" , |
978 | [EXCP_AdEL] = "address error load" , |
979 | [EXCP_AdES] = "address error store" , |
980 | [EXCP_TLBF] = "TLB refill" , |
981 | [EXCP_IBE] = "instruction bus error" , |
982 | [EXCP_DBp] = "debug breakpoint" , |
983 | [EXCP_SYSCALL] = "syscall" , |
984 | [EXCP_BREAK] = "break" , |
985 | [EXCP_CpU] = "coprocessor unusable" , |
986 | [EXCP_RI] = "reserved instruction" , |
987 | [EXCP_OVERFLOW] = "arithmetic overflow" , |
988 | [EXCP_TRAP] = "trap" , |
989 | [EXCP_FPE] = "floating point" , |
990 | [EXCP_DDBS] = "debug data break store" , |
991 | [EXCP_DWATCH] = "data watchpoint" , |
992 | [EXCP_LTLBL] = "TLB modify" , |
993 | [EXCP_TLBL] = "TLB load" , |
994 | [EXCP_TLBS] = "TLB store" , |
995 | [EXCP_DBE] = "data bus error" , |
996 | [EXCP_DDBL] = "debug data break load" , |
997 | [EXCP_THREAD] = "thread" , |
998 | [EXCP_MDMX] = "MDMX" , |
999 | [EXCP_C2E] = "precise coprocessor 2" , |
1000 | [EXCP_CACHE] = "cache error" , |
1001 | [EXCP_TLBXI] = "TLB execute-inhibit" , |
1002 | [EXCP_TLBRI] = "TLB read-inhibit" , |
1003 | [EXCP_MSADIS] = "MSA disabled" , |
1004 | [EXCP_MSAFPE] = "MSA floating point" , |
1005 | }; |
1006 | #endif |
1007 | |
1008 | target_ulong exception_resume_pc (CPUMIPSState *env) |
1009 | { |
1010 | target_ulong bad_pc; |
1011 | target_ulong isa_mode; |
1012 | |
1013 | isa_mode = !!(env->hflags & MIPS_HFLAG_M16); |
1014 | bad_pc = env->active_tc.PC | isa_mode; |
1015 | if (env->hflags & MIPS_HFLAG_BMASK) { |
1016 | /* If the exception was raised from a delay slot, come back to |
1017 | the jump. */ |
1018 | bad_pc -= (env->hflags & MIPS_HFLAG_B16 ? 2 : 4); |
1019 | } |
1020 | |
1021 | return bad_pc; |
1022 | } |
1023 | |
1024 | #if !defined(CONFIG_USER_ONLY) |
1025 | static void set_hflags_for_handler (CPUMIPSState *env) |
1026 | { |
1027 | /* Exception handlers are entered in 32-bit mode. */ |
1028 | env->hflags &= ~(MIPS_HFLAG_M16); |
1029 | /* ...except that microMIPS lets you choose. */ |
1030 | if (env->insn_flags & ASE_MICROMIPS) { |
1031 | env->hflags |= (!!(env->CP0_Config3 |
1032 | & (1 << CP0C3_ISA_ON_EXC)) |
1033 | << MIPS_HFLAG_M16_SHIFT); |
1034 | } |
1035 | } |
1036 | |
1037 | static inline void set_badinstr_registers(CPUMIPSState *env) |
1038 | { |
1039 | if (env->insn_flags & ISA_NANOMIPS32) { |
1040 | if (env->CP0_Config3 & (1 << CP0C3_BI)) { |
1041 | uint32_t instr = (cpu_lduw_code(env, env->active_tc.PC)) << 16; |
1042 | if ((instr & 0x10000000) == 0) { |
1043 | instr |= cpu_lduw_code(env, env->active_tc.PC + 2); |
1044 | } |
1045 | env->CP0_BadInstr = instr; |
1046 | |
1047 | if ((instr & 0xFC000000) == 0x60000000) { |
1048 | instr = cpu_lduw_code(env, env->active_tc.PC + 4) << 16; |
1049 | env->CP0_BadInstrX = instr; |
1050 | } |
1051 | } |
1052 | return; |
1053 | } |
1054 | |
1055 | if (env->hflags & MIPS_HFLAG_M16) { |
1056 | /* TODO: add BadInstr support for microMIPS */ |
1057 | return; |
1058 | } |
1059 | if (env->CP0_Config3 & (1 << CP0C3_BI)) { |
1060 | env->CP0_BadInstr = cpu_ldl_code(env, env->active_tc.PC); |
1061 | } |
1062 | if ((env->CP0_Config3 & (1 << CP0C3_BP)) && |
1063 | (env->hflags & MIPS_HFLAG_BMASK)) { |
1064 | env->CP0_BadInstrP = cpu_ldl_code(env, env->active_tc.PC - 4); |
1065 | } |
1066 | } |
1067 | #endif |
1068 | |
1069 | void mips_cpu_do_interrupt(CPUState *cs) |
1070 | { |
1071 | #if !defined(CONFIG_USER_ONLY) |
1072 | MIPSCPU *cpu = MIPS_CPU(cs); |
1073 | CPUMIPSState *env = &cpu->env; |
1074 | bool update_badinstr = 0; |
1075 | target_ulong offset; |
1076 | int cause = -1; |
1077 | const char *name; |
1078 | |
1079 | if (qemu_loglevel_mask(CPU_LOG_INT) |
1080 | && cs->exception_index != EXCP_EXT_INTERRUPT) { |
1081 | if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) { |
1082 | name = "unknown" ; |
1083 | } else { |
1084 | name = excp_names[cs->exception_index]; |
1085 | } |
1086 | |
1087 | qemu_log("%s enter: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx |
1088 | " %s exception\n" , |
1089 | __func__, env->active_tc.PC, env->CP0_EPC, name); |
1090 | } |
1091 | if (cs->exception_index == EXCP_EXT_INTERRUPT && |
1092 | (env->hflags & MIPS_HFLAG_DM)) { |
1093 | cs->exception_index = EXCP_DINT; |
1094 | } |
1095 | offset = 0x180; |
1096 | switch (cs->exception_index) { |
1097 | case EXCP_DSS: |
1098 | env->CP0_Debug |= 1 << CP0DB_DSS; |
1099 | /* Debug single step cannot be raised inside a delay slot and |
1100 | resume will always occur on the next instruction |
1101 | (but we assume the pc has always been updated during |
1102 | code translation). */ |
1103 | env->CP0_DEPC = env->active_tc.PC | !!(env->hflags & MIPS_HFLAG_M16); |
1104 | goto enter_debug_mode; |
1105 | case EXCP_DINT: |
1106 | env->CP0_Debug |= 1 << CP0DB_DINT; |
1107 | goto set_DEPC; |
1108 | case EXCP_DIB: |
1109 | env->CP0_Debug |= 1 << CP0DB_DIB; |
1110 | goto set_DEPC; |
1111 | case EXCP_DBp: |
1112 | env->CP0_Debug |= 1 << CP0DB_DBp; |
1113 | /* Setup DExcCode - SDBBP instruction */ |
1114 | env->CP0_Debug = (env->CP0_Debug & ~(0x1fULL << CP0DB_DEC)) | 9 << CP0DB_DEC; |
1115 | goto set_DEPC; |
1116 | case EXCP_DDBS: |
1117 | env->CP0_Debug |= 1 << CP0DB_DDBS; |
1118 | goto set_DEPC; |
1119 | case EXCP_DDBL: |
1120 | env->CP0_Debug |= 1 << CP0DB_DDBL; |
1121 | set_DEPC: |
1122 | env->CP0_DEPC = exception_resume_pc(env); |
1123 | env->hflags &= ~MIPS_HFLAG_BMASK; |
1124 | enter_debug_mode: |
1125 | if (env->insn_flags & ISA_MIPS3) { |
1126 | env->hflags |= MIPS_HFLAG_64; |
1127 | if (!(env->insn_flags & ISA_MIPS64R6) || |
1128 | env->CP0_Status & (1 << CP0St_KX)) { |
1129 | env->hflags &= ~MIPS_HFLAG_AWRAP; |
1130 | } |
1131 | } |
1132 | env->hflags |= MIPS_HFLAG_DM | MIPS_HFLAG_CP0; |
1133 | env->hflags &= ~(MIPS_HFLAG_KSU); |
1134 | /* EJTAG probe trap enable is not implemented... */ |
1135 | if (!(env->CP0_Status & (1 << CP0St_EXL))) |
1136 | env->CP0_Cause &= ~(1U << CP0Ca_BD); |
1137 | env->active_tc.PC = env->exception_base + 0x480; |
1138 | set_hflags_for_handler(env); |
1139 | break; |
1140 | case EXCP_RESET: |
1141 | cpu_reset(CPU(cpu)); |
1142 | break; |
1143 | case EXCP_SRESET: |
1144 | env->CP0_Status |= (1 << CP0St_SR); |
1145 | memset(env->CP0_WatchLo, 0, sizeof(env->CP0_WatchLo)); |
1146 | goto set_error_EPC; |
1147 | case EXCP_NMI: |
1148 | env->CP0_Status |= (1 << CP0St_NMI); |
1149 | set_error_EPC: |
1150 | env->CP0_ErrorEPC = exception_resume_pc(env); |
1151 | env->hflags &= ~MIPS_HFLAG_BMASK; |
1152 | env->CP0_Status |= (1 << CP0St_ERL) | (1 << CP0St_BEV); |
1153 | if (env->insn_flags & ISA_MIPS3) { |
1154 | env->hflags |= MIPS_HFLAG_64; |
1155 | if (!(env->insn_flags & ISA_MIPS64R6) || |
1156 | env->CP0_Status & (1 << CP0St_KX)) { |
1157 | env->hflags &= ~MIPS_HFLAG_AWRAP; |
1158 | } |
1159 | } |
1160 | env->hflags |= MIPS_HFLAG_CP0; |
1161 | env->hflags &= ~(MIPS_HFLAG_KSU); |
1162 | if (!(env->CP0_Status & (1 << CP0St_EXL))) |
1163 | env->CP0_Cause &= ~(1U << CP0Ca_BD); |
1164 | env->active_tc.PC = env->exception_base; |
1165 | set_hflags_for_handler(env); |
1166 | break; |
1167 | case EXCP_EXT_INTERRUPT: |
1168 | cause = 0; |
1169 | if (env->CP0_Cause & (1 << CP0Ca_IV)) { |
1170 | uint32_t spacing = (env->CP0_IntCtl >> CP0IntCtl_VS) & 0x1f; |
1171 | |
1172 | if ((env->CP0_Status & (1 << CP0St_BEV)) || spacing == 0) { |
1173 | offset = 0x200; |
1174 | } else { |
1175 | uint32_t vector = 0; |
1176 | uint32_t pending = (env->CP0_Cause & CP0Ca_IP_mask) >> CP0Ca_IP; |
1177 | |
1178 | if (env->CP0_Config3 & (1 << CP0C3_VEIC)) { |
1179 | /* For VEIC mode, the external interrupt controller feeds |
1180 | * the vector through the CP0Cause IP lines. */ |
1181 | vector = pending; |
1182 | } else { |
1183 | /* Vectored Interrupts |
1184 | * Mask with Status.IM7-IM0 to get enabled interrupts. */ |
1185 | pending &= (env->CP0_Status >> CP0St_IM) & 0xff; |
1186 | /* Find the highest-priority interrupt. */ |
1187 | while (pending >>= 1) { |
1188 | vector++; |
1189 | } |
1190 | } |
1191 | offset = 0x200 + (vector * (spacing << 5)); |
1192 | } |
1193 | } |
1194 | goto set_EPC; |
1195 | case EXCP_LTLBL: |
1196 | cause = 1; |
1197 | update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); |
1198 | goto set_EPC; |
1199 | case EXCP_TLBL: |
1200 | cause = 2; |
1201 | update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); |
1202 | if ((env->error_code & EXCP_TLB_NOMATCH) && |
1203 | !(env->CP0_Status & (1 << CP0St_EXL))) { |
1204 | #if defined(TARGET_MIPS64) |
1205 | int R = env->CP0_BadVAddr >> 62; |
1206 | int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; |
1207 | int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; |
1208 | |
1209 | if ((R != 0 || UX) && (R != 3 || KX) && |
1210 | (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) { |
1211 | offset = 0x080; |
1212 | } else { |
1213 | #endif |
1214 | offset = 0x000; |
1215 | #if defined(TARGET_MIPS64) |
1216 | } |
1217 | #endif |
1218 | } |
1219 | goto set_EPC; |
1220 | case EXCP_TLBS: |
1221 | cause = 3; |
1222 | update_badinstr = 1; |
1223 | if ((env->error_code & EXCP_TLB_NOMATCH) && |
1224 | !(env->CP0_Status & (1 << CP0St_EXL))) { |
1225 | #if defined(TARGET_MIPS64) |
1226 | int R = env->CP0_BadVAddr >> 62; |
1227 | int UX = (env->CP0_Status & (1 << CP0St_UX)) != 0; |
1228 | int KX = (env->CP0_Status & (1 << CP0St_KX)) != 0; |
1229 | |
1230 | if ((R != 0 || UX) && (R != 3 || KX) && |
1231 | (!(env->insn_flags & (INSN_LOONGSON2E | INSN_LOONGSON2F)))) { |
1232 | offset = 0x080; |
1233 | } else { |
1234 | #endif |
1235 | offset = 0x000; |
1236 | #if defined(TARGET_MIPS64) |
1237 | } |
1238 | #endif |
1239 | } |
1240 | goto set_EPC; |
1241 | case EXCP_AdEL: |
1242 | cause = 4; |
1243 | update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); |
1244 | goto set_EPC; |
1245 | case EXCP_AdES: |
1246 | cause = 5; |
1247 | update_badinstr = 1; |
1248 | goto set_EPC; |
1249 | case EXCP_IBE: |
1250 | cause = 6; |
1251 | goto set_EPC; |
1252 | case EXCP_DBE: |
1253 | cause = 7; |
1254 | goto set_EPC; |
1255 | case EXCP_SYSCALL: |
1256 | cause = 8; |
1257 | update_badinstr = 1; |
1258 | goto set_EPC; |
1259 | case EXCP_BREAK: |
1260 | cause = 9; |
1261 | update_badinstr = 1; |
1262 | goto set_EPC; |
1263 | case EXCP_RI: |
1264 | cause = 10; |
1265 | update_badinstr = 1; |
1266 | goto set_EPC; |
1267 | case EXCP_CpU: |
1268 | cause = 11; |
1269 | update_badinstr = 1; |
1270 | env->CP0_Cause = (env->CP0_Cause & ~(0x3 << CP0Ca_CE)) | |
1271 | (env->error_code << CP0Ca_CE); |
1272 | goto set_EPC; |
1273 | case EXCP_OVERFLOW: |
1274 | cause = 12; |
1275 | update_badinstr = 1; |
1276 | goto set_EPC; |
1277 | case EXCP_TRAP: |
1278 | cause = 13; |
1279 | update_badinstr = 1; |
1280 | goto set_EPC; |
1281 | case EXCP_MSAFPE: |
1282 | cause = 14; |
1283 | update_badinstr = 1; |
1284 | goto set_EPC; |
1285 | case EXCP_FPE: |
1286 | cause = 15; |
1287 | update_badinstr = 1; |
1288 | goto set_EPC; |
1289 | case EXCP_C2E: |
1290 | cause = 18; |
1291 | goto set_EPC; |
1292 | case EXCP_TLBRI: |
1293 | cause = 19; |
1294 | update_badinstr = 1; |
1295 | goto set_EPC; |
1296 | case EXCP_TLBXI: |
1297 | cause = 20; |
1298 | goto set_EPC; |
1299 | case EXCP_MSADIS: |
1300 | cause = 21; |
1301 | update_badinstr = 1; |
1302 | goto set_EPC; |
1303 | case EXCP_MDMX: |
1304 | cause = 22; |
1305 | goto set_EPC; |
1306 | case EXCP_DWATCH: |
1307 | cause = 23; |
1308 | /* XXX: TODO: manage deferred watch exceptions */ |
1309 | goto set_EPC; |
1310 | case EXCP_MCHECK: |
1311 | cause = 24; |
1312 | goto set_EPC; |
1313 | case EXCP_THREAD: |
1314 | cause = 25; |
1315 | goto set_EPC; |
1316 | case EXCP_DSPDIS: |
1317 | cause = 26; |
1318 | goto set_EPC; |
1319 | case EXCP_CACHE: |
1320 | cause = 30; |
1321 | offset = 0x100; |
1322 | set_EPC: |
1323 | if (!(env->CP0_Status & (1 << CP0St_EXL))) { |
1324 | env->CP0_EPC = exception_resume_pc(env); |
1325 | if (update_badinstr) { |
1326 | set_badinstr_registers(env); |
1327 | } |
1328 | if (env->hflags & MIPS_HFLAG_BMASK) { |
1329 | env->CP0_Cause |= (1U << CP0Ca_BD); |
1330 | } else { |
1331 | env->CP0_Cause &= ~(1U << CP0Ca_BD); |
1332 | } |
1333 | env->CP0_Status |= (1 << CP0St_EXL); |
1334 | if (env->insn_flags & ISA_MIPS3) { |
1335 | env->hflags |= MIPS_HFLAG_64; |
1336 | if (!(env->insn_flags & ISA_MIPS64R6) || |
1337 | env->CP0_Status & (1 << CP0St_KX)) { |
1338 | env->hflags &= ~MIPS_HFLAG_AWRAP; |
1339 | } |
1340 | } |
1341 | env->hflags |= MIPS_HFLAG_CP0; |
1342 | env->hflags &= ~(MIPS_HFLAG_KSU); |
1343 | } |
1344 | env->hflags &= ~MIPS_HFLAG_BMASK; |
1345 | if (env->CP0_Status & (1 << CP0St_BEV)) { |
1346 | env->active_tc.PC = env->exception_base + 0x200; |
1347 | } else if (cause == 30 && !(env->CP0_Config3 & (1 << CP0C3_SC) && |
1348 | env->CP0_Config5 & (1 << CP0C5_CV))) { |
1349 | /* Force KSeg1 for cache errors */ |
1350 | env->active_tc.PC = KSEG1_BASE | (env->CP0_EBase & 0x1FFFF000); |
1351 | } else { |
1352 | env->active_tc.PC = env->CP0_EBase & ~0xfff; |
1353 | } |
1354 | |
1355 | env->active_tc.PC += offset; |
1356 | set_hflags_for_handler(env); |
1357 | env->CP0_Cause = (env->CP0_Cause & ~(0x1f << CP0Ca_EC)) | (cause << CP0Ca_EC); |
1358 | break; |
1359 | default: |
1360 | abort(); |
1361 | } |
1362 | if (qemu_loglevel_mask(CPU_LOG_INT) |
1363 | && cs->exception_index != EXCP_EXT_INTERRUPT) { |
1364 | qemu_log("%s: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx " cause %d\n" |
1365 | " S %08x C %08x A " TARGET_FMT_lx " D " TARGET_FMT_lx "\n" , |
1366 | __func__, env->active_tc.PC, env->CP0_EPC, cause, |
1367 | env->CP0_Status, env->CP0_Cause, env->CP0_BadVAddr, |
1368 | env->CP0_DEPC); |
1369 | } |
1370 | #endif |
1371 | cs->exception_index = EXCP_NONE; |
1372 | } |
1373 | |
1374 | bool mips_cpu_exec_interrupt(CPUState *cs, int interrupt_request) |
1375 | { |
1376 | if (interrupt_request & CPU_INTERRUPT_HARD) { |
1377 | MIPSCPU *cpu = MIPS_CPU(cs); |
1378 | CPUMIPSState *env = &cpu->env; |
1379 | |
1380 | if (cpu_mips_hw_interrupts_enabled(env) && |
1381 | cpu_mips_hw_interrupts_pending(env)) { |
1382 | /* Raise it */ |
1383 | cs->exception_index = EXCP_EXT_INTERRUPT; |
1384 | env->error_code = 0; |
1385 | mips_cpu_do_interrupt(cs); |
1386 | return true; |
1387 | } |
1388 | } |
1389 | return false; |
1390 | } |
1391 | |
1392 | #if !defined(CONFIG_USER_ONLY) |
1393 | void r4k_invalidate_tlb (CPUMIPSState *env, int idx, int use_extra) |
1394 | { |
1395 | CPUState *cs = env_cpu(env); |
1396 | r4k_tlb_t *tlb; |
1397 | target_ulong addr; |
1398 | target_ulong end; |
1399 | uint16_t ASID = env->CP0_EntryHi & env->CP0_EntryHi_ASID_mask; |
1400 | target_ulong mask; |
1401 | |
1402 | tlb = &env->tlb->mmu.r4k.tlb[idx]; |
1403 | /* The qemu TLB is flushed when the ASID changes, so no need to |
1404 | flush these entries again. */ |
1405 | if (tlb->G == 0 && tlb->ASID != ASID) { |
1406 | return; |
1407 | } |
1408 | |
1409 | if (use_extra && env->tlb->tlb_in_use < MIPS_TLB_MAX) { |
1410 | /* For tlbwr, we can shadow the discarded entry into |
1411 | a new (fake) TLB entry, as long as the guest can not |
1412 | tell that it's there. */ |
1413 | env->tlb->mmu.r4k.tlb[env->tlb->tlb_in_use] = *tlb; |
1414 | env->tlb->tlb_in_use++; |
1415 | return; |
1416 | } |
1417 | |
1418 | /* 1k pages are not supported. */ |
1419 | mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1); |
1420 | if (tlb->V0) { |
1421 | addr = tlb->VPN & ~mask; |
1422 | #if defined(TARGET_MIPS64) |
1423 | if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { |
1424 | addr |= 0x3FFFFF0000000000ULL; |
1425 | } |
1426 | #endif |
1427 | end = addr | (mask >> 1); |
1428 | while (addr < end) { |
1429 | tlb_flush_page(cs, addr); |
1430 | addr += TARGET_PAGE_SIZE; |
1431 | } |
1432 | } |
1433 | if (tlb->V1) { |
1434 | addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1); |
1435 | #if defined(TARGET_MIPS64) |
1436 | if (addr >= (0xFFFFFFFF80000000ULL & env->SEGMask)) { |
1437 | addr |= 0x3FFFFF0000000000ULL; |
1438 | } |
1439 | #endif |
1440 | end = addr | mask; |
1441 | while (addr - 1 < end) { |
1442 | tlb_flush_page(cs, addr); |
1443 | addr += TARGET_PAGE_SIZE; |
1444 | } |
1445 | } |
1446 | } |
1447 | #endif |
1448 | |
1449 | void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env, |
1450 | uint32_t exception, |
1451 | int error_code, |
1452 | uintptr_t pc) |
1453 | { |
1454 | CPUState *cs = env_cpu(env); |
1455 | |
1456 | qemu_log_mask(CPU_LOG_INT, "%s: %d %d\n" , |
1457 | __func__, exception, error_code); |
1458 | cs->exception_index = exception; |
1459 | env->error_code = error_code; |
1460 | |
1461 | cpu_loop_exit_restore(cs, pc); |
1462 | } |
1463 | |
1464 | static void mips_cpu_add_definition(gpointer data, gpointer user_data) |
1465 | { |
1466 | ObjectClass *oc = data; |
1467 | CpuDefinitionInfoList **cpu_list = user_data; |
1468 | CpuDefinitionInfoList *entry; |
1469 | CpuDefinitionInfo *info; |
1470 | const char *typename; |
1471 | |
1472 | typename = object_class_get_name(oc); |
1473 | info = g_malloc0(sizeof(*info)); |
1474 | info->name = g_strndup(typename, |
1475 | strlen(typename) - strlen("-" TYPE_MIPS_CPU)); |
1476 | info->q_typename = g_strdup(typename); |
1477 | |
1478 | entry = g_malloc0(sizeof(*entry)); |
1479 | entry->value = info; |
1480 | entry->next = *cpu_list; |
1481 | *cpu_list = entry; |
1482 | } |
1483 | |
1484 | CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) |
1485 | { |
1486 | CpuDefinitionInfoList *cpu_list = NULL; |
1487 | GSList *list; |
1488 | |
1489 | list = object_class_get_list(TYPE_MIPS_CPU, false); |
1490 | g_slist_foreach(list, mips_cpu_add_definition, &cpu_list); |
1491 | g_slist_free(list); |
1492 | |
1493 | return cpu_list; |
1494 | } |
1495 | |