1 | /* |
2 | * RISC-V CPU helpers for qemu. |
3 | * |
4 | * Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu |
5 | * Copyright (c) 2017-2018 SiFive, Inc. |
6 | * |
7 | * This program is free software; you can redistribute it and/or modify it |
8 | * under the terms and conditions of the GNU General Public License, |
9 | * version 2 or later, as published by the Free Software Foundation. |
10 | * |
11 | * This program is distributed in the hope it will be useful, but WITHOUT |
12 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
14 | * more details. |
15 | * |
16 | * You should have received a copy of the GNU General Public License along with |
17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
18 | */ |
19 | |
20 | #include "qemu/osdep.h" |
21 | #include "qemu/log.h" |
22 | #include "cpu.h" |
23 | #include "exec/exec-all.h" |
24 | #include "tcg-op.h" |
25 | #include "trace.h" |
26 | |
27 | int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch) |
28 | { |
29 | #ifdef CONFIG_USER_ONLY |
30 | return 0; |
31 | #else |
32 | return env->priv; |
33 | #endif |
34 | } |
35 | |
36 | #ifndef CONFIG_USER_ONLY |
37 | static int riscv_cpu_local_irq_pending(CPURISCVState *env) |
38 | { |
39 | target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE); |
40 | target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE); |
41 | target_ulong pending = atomic_read(&env->mip) & env->mie; |
42 | target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie); |
43 | target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie); |
44 | target_ulong irqs = (pending & ~env->mideleg & -mie) | |
45 | (pending & env->mideleg & -sie); |
46 | |
47 | if (irqs) { |
48 | return ctz64(irqs); /* since non-zero */ |
49 | } else { |
50 | return EXCP_NONE; /* indicates no pending interrupt */ |
51 | } |
52 | } |
53 | #endif |
54 | |
55 | bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) |
56 | { |
57 | #if !defined(CONFIG_USER_ONLY) |
58 | if (interrupt_request & CPU_INTERRUPT_HARD) { |
59 | RISCVCPU *cpu = RISCV_CPU(cs); |
60 | CPURISCVState *env = &cpu->env; |
61 | int interruptno = riscv_cpu_local_irq_pending(env); |
62 | if (interruptno >= 0) { |
63 | cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno; |
64 | riscv_cpu_do_interrupt(cs); |
65 | return true; |
66 | } |
67 | } |
68 | #endif |
69 | return false; |
70 | } |
71 | |
72 | #if !defined(CONFIG_USER_ONLY) |
73 | |
74 | int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts) |
75 | { |
76 | CPURISCVState *env = &cpu->env; |
77 | if (env->miclaim & interrupts) { |
78 | return -1; |
79 | } else { |
80 | env->miclaim |= interrupts; |
81 | return 0; |
82 | } |
83 | } |
84 | |
85 | struct CpuAsyncInfo { |
86 | uint32_t new_mip; |
87 | }; |
88 | |
89 | static void riscv_cpu_update_mip_irqs_async(CPUState *target_cpu_state, |
90 | run_on_cpu_data data) |
91 | { |
92 | struct CpuAsyncInfo *info = (struct CpuAsyncInfo *) data.host_ptr; |
93 | |
94 | if (info->new_mip) { |
95 | cpu_interrupt(target_cpu_state, CPU_INTERRUPT_HARD); |
96 | } else { |
97 | cpu_reset_interrupt(target_cpu_state, CPU_INTERRUPT_HARD); |
98 | } |
99 | |
100 | g_free(info); |
101 | } |
102 | |
103 | uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) |
104 | { |
105 | CPURISCVState *env = &cpu->env; |
106 | CPUState *cs = CPU(cpu); |
107 | struct CpuAsyncInfo *info; |
108 | uint32_t old, new, cmp = atomic_read(&env->mip); |
109 | |
110 | do { |
111 | old = cmp; |
112 | new = (old & ~mask) | (value & mask); |
113 | cmp = atomic_cmpxchg(&env->mip, old, new); |
114 | } while (old != cmp); |
115 | |
116 | info = g_new(struct CpuAsyncInfo, 1); |
117 | info->new_mip = new; |
118 | |
119 | async_run_on_cpu(cs, riscv_cpu_update_mip_irqs_async, |
120 | RUN_ON_CPU_HOST_PTR(info)); |
121 | |
122 | return old; |
123 | } |
124 | |
125 | void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) |
126 | { |
127 | if (newpriv > PRV_M) { |
128 | g_assert_not_reached(); |
129 | } |
130 | if (newpriv == PRV_H) { |
131 | newpriv = PRV_U; |
132 | } |
133 | /* tlb_flush is unnecessary as mode is contained in mmu_idx */ |
134 | env->priv = newpriv; |
135 | |
136 | /* |
137 | * Clear the load reservation - otherwise a reservation placed in one |
138 | * context/process can be used by another, resulting in an SC succeeding |
139 | * incorrectly. Version 2.2 of the ISA specification explicitly requires |
140 | * this behaviour, while later revisions say that the kernel "should" use |
141 | * an SC instruction to force the yielding of a load reservation on a |
142 | * preemptive context switch. As a result, do both. |
143 | */ |
144 | env->load_res = -1; |
145 | } |
146 | |
147 | /* get_physical_address - get the physical address for this virtual address |
148 | * |
149 | * Do a page table walk to obtain the physical address corresponding to a |
150 | * virtual address. Returns 0 if the translation was successful |
151 | * |
152 | * Adapted from Spike's mmu_t::translate and mmu_t::walk |
153 | * |
154 | */ |
155 | static int get_physical_address(CPURISCVState *env, hwaddr *physical, |
156 | int *prot, target_ulong addr, |
157 | int access_type, int mmu_idx) |
158 | { |
159 | /* NOTE: the env->pc value visible here will not be |
160 | * correct, but the value visible to the exception handler |
161 | * (riscv_cpu_do_interrupt) is correct */ |
162 | |
163 | int mode = mmu_idx; |
164 | |
165 | if (mode == PRV_M && access_type != MMU_INST_FETCH) { |
166 | if (get_field(env->mstatus, MSTATUS_MPRV)) { |
167 | mode = get_field(env->mstatus, MSTATUS_MPP); |
168 | } |
169 | } |
170 | |
171 | if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { |
172 | *physical = addr; |
173 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
174 | return TRANSLATE_SUCCESS; |
175 | } |
176 | |
177 | *prot = 0; |
178 | |
179 | target_ulong base; |
180 | int levels, ptidxbits, ptesize, vm, sum; |
181 | int mxr = get_field(env->mstatus, MSTATUS_MXR); |
182 | |
183 | if (env->priv_ver >= PRIV_VERSION_1_10_0) { |
184 | base = get_field(env->satp, SATP_PPN) << PGSHIFT; |
185 | sum = get_field(env->mstatus, MSTATUS_SUM); |
186 | vm = get_field(env->satp, SATP_MODE); |
187 | switch (vm) { |
188 | case VM_1_10_SV32: |
189 | levels = 2; ptidxbits = 10; ptesize = 4; break; |
190 | case VM_1_10_SV39: |
191 | levels = 3; ptidxbits = 9; ptesize = 8; break; |
192 | case VM_1_10_SV48: |
193 | levels = 4; ptidxbits = 9; ptesize = 8; break; |
194 | case VM_1_10_SV57: |
195 | levels = 5; ptidxbits = 9; ptesize = 8; break; |
196 | case VM_1_10_MBARE: |
197 | *physical = addr; |
198 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
199 | return TRANSLATE_SUCCESS; |
200 | default: |
201 | g_assert_not_reached(); |
202 | } |
203 | } else { |
204 | base = env->sptbr << PGSHIFT; |
205 | sum = !get_field(env->mstatus, MSTATUS_PUM); |
206 | vm = get_field(env->mstatus, MSTATUS_VM); |
207 | switch (vm) { |
208 | case VM_1_09_SV32: |
209 | levels = 2; ptidxbits = 10; ptesize = 4; break; |
210 | case VM_1_09_SV39: |
211 | levels = 3; ptidxbits = 9; ptesize = 8; break; |
212 | case VM_1_09_SV48: |
213 | levels = 4; ptidxbits = 9; ptesize = 8; break; |
214 | case VM_1_09_MBARE: |
215 | *physical = addr; |
216 | *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
217 | return TRANSLATE_SUCCESS; |
218 | default: |
219 | g_assert_not_reached(); |
220 | } |
221 | } |
222 | |
223 | CPUState *cs = env_cpu(env); |
224 | int va_bits = PGSHIFT + levels * ptidxbits; |
225 | target_ulong mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1; |
226 | target_ulong masked_msbs = (addr >> (va_bits - 1)) & mask; |
227 | if (masked_msbs != 0 && masked_msbs != mask) { |
228 | return TRANSLATE_FAIL; |
229 | } |
230 | |
231 | int ptshift = (levels - 1) * ptidxbits; |
232 | int i; |
233 | |
234 | #if !TCG_OVERSIZED_GUEST |
235 | restart: |
236 | #endif |
237 | for (i = 0; i < levels; i++, ptshift -= ptidxbits) { |
238 | target_ulong idx = (addr >> (PGSHIFT + ptshift)) & |
239 | ((1 << ptidxbits) - 1); |
240 | |
241 | /* check that physical address of PTE is legal */ |
242 | target_ulong pte_addr = base + idx * ptesize; |
243 | |
244 | if (riscv_feature(env, RISCV_FEATURE_PMP) && |
245 | !pmp_hart_has_privs(env, pte_addr, sizeof(target_ulong), |
246 | 1 << MMU_DATA_LOAD, PRV_S)) { |
247 | return TRANSLATE_PMP_FAIL; |
248 | } |
249 | #if defined(TARGET_RISCV32) |
250 | target_ulong pte = ldl_phys(cs->as, pte_addr); |
251 | #elif defined(TARGET_RISCV64) |
252 | target_ulong pte = ldq_phys(cs->as, pte_addr); |
253 | #endif |
254 | target_ulong ppn = pte >> PTE_PPN_SHIFT; |
255 | |
256 | if (!(pte & PTE_V)) { |
257 | /* Invalid PTE */ |
258 | return TRANSLATE_FAIL; |
259 | } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { |
260 | /* Inner PTE, continue walking */ |
261 | base = ppn << PGSHIFT; |
262 | } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { |
263 | /* Reserved leaf PTE flags: PTE_W */ |
264 | return TRANSLATE_FAIL; |
265 | } else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) { |
266 | /* Reserved leaf PTE flags: PTE_W + PTE_X */ |
267 | return TRANSLATE_FAIL; |
268 | } else if ((pte & PTE_U) && ((mode != PRV_U) && |
269 | (!sum || access_type == MMU_INST_FETCH))) { |
270 | /* User PTE flags when not U mode and mstatus.SUM is not set, |
271 | or the access type is an instruction fetch */ |
272 | return TRANSLATE_FAIL; |
273 | } else if (!(pte & PTE_U) && (mode != PRV_S)) { |
274 | /* Supervisor PTE flags when not S mode */ |
275 | return TRANSLATE_FAIL; |
276 | } else if (ppn & ((1ULL << ptshift) - 1)) { |
277 | /* Misaligned PPN */ |
278 | return TRANSLATE_FAIL; |
279 | } else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) || |
280 | ((pte & PTE_X) && mxr))) { |
281 | /* Read access check failed */ |
282 | return TRANSLATE_FAIL; |
283 | } else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) { |
284 | /* Write access check failed */ |
285 | return TRANSLATE_FAIL; |
286 | } else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) { |
287 | /* Fetch access check failed */ |
288 | return TRANSLATE_FAIL; |
289 | } else { |
290 | /* if necessary, set accessed and dirty bits. */ |
291 | target_ulong updated_pte = pte | PTE_A | |
292 | (access_type == MMU_DATA_STORE ? PTE_D : 0); |
293 | |
294 | /* Page table updates need to be atomic with MTTCG enabled */ |
295 | if (updated_pte != pte) { |
296 | /* |
297 | * - if accessed or dirty bits need updating, and the PTE is |
298 | * in RAM, then we do so atomically with a compare and swap. |
299 | * - if the PTE is in IO space or ROM, then it can't be updated |
300 | * and we return TRANSLATE_FAIL. |
301 | * - if the PTE changed by the time we went to update it, then |
302 | * it is no longer valid and we must re-walk the page table. |
303 | */ |
304 | MemoryRegion *mr; |
305 | hwaddr l = sizeof(target_ulong), addr1; |
306 | mr = address_space_translate(cs->as, pte_addr, |
307 | &addr1, &l, false, MEMTXATTRS_UNSPECIFIED); |
308 | if (memory_region_is_ram(mr)) { |
309 | target_ulong *pte_pa = |
310 | qemu_map_ram_ptr(mr->ram_block, addr1); |
311 | #if TCG_OVERSIZED_GUEST |
312 | /* MTTCG is not enabled on oversized TCG guests so |
313 | * page table updates do not need to be atomic */ |
314 | *pte_pa = pte = updated_pte; |
315 | #else |
316 | target_ulong old_pte = |
317 | atomic_cmpxchg(pte_pa, pte, updated_pte); |
318 | if (old_pte != pte) { |
319 | goto restart; |
320 | } else { |
321 | pte = updated_pte; |
322 | } |
323 | #endif |
324 | } else { |
325 | /* misconfigured PTE in ROM (AD bits are not preset) or |
326 | * PTE is in IO space and can't be updated atomically */ |
327 | return TRANSLATE_FAIL; |
328 | } |
329 | } |
330 | |
331 | /* for superpage mappings, make a fake leaf PTE for the TLB's |
332 | benefit. */ |
333 | target_ulong vpn = addr >> PGSHIFT; |
334 | *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT; |
335 | |
336 | /* set permissions on the TLB entry */ |
337 | if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { |
338 | *prot |= PAGE_READ; |
339 | } |
340 | if ((pte & PTE_X)) { |
341 | *prot |= PAGE_EXEC; |
342 | } |
343 | /* add write permission on stores or if the page is already dirty, |
344 | so that we TLB miss on later writes to update the dirty bit */ |
345 | if ((pte & PTE_W) && |
346 | (access_type == MMU_DATA_STORE || (pte & PTE_D))) { |
347 | *prot |= PAGE_WRITE; |
348 | } |
349 | return TRANSLATE_SUCCESS; |
350 | } |
351 | } |
352 | return TRANSLATE_FAIL; |
353 | } |
354 | |
355 | static void raise_mmu_exception(CPURISCVState *env, target_ulong address, |
356 | MMUAccessType access_type, bool pmp_violation) |
357 | { |
358 | CPUState *cs = env_cpu(env); |
359 | int page_fault_exceptions = |
360 | (env->priv_ver >= PRIV_VERSION_1_10_0) && |
361 | get_field(env->satp, SATP_MODE) != VM_1_10_MBARE && |
362 | !pmp_violation; |
363 | switch (access_type) { |
364 | case MMU_INST_FETCH: |
365 | cs->exception_index = page_fault_exceptions ? |
366 | RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT; |
367 | break; |
368 | case MMU_DATA_LOAD: |
369 | cs->exception_index = page_fault_exceptions ? |
370 | RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT; |
371 | break; |
372 | case MMU_DATA_STORE: |
373 | cs->exception_index = page_fault_exceptions ? |
374 | RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT; |
375 | break; |
376 | default: |
377 | g_assert_not_reached(); |
378 | } |
379 | env->badaddr = address; |
380 | } |
381 | |
382 | hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) |
383 | { |
384 | RISCVCPU *cpu = RISCV_CPU(cs); |
385 | hwaddr phys_addr; |
386 | int prot; |
387 | int mmu_idx = cpu_mmu_index(&cpu->env, false); |
388 | |
389 | if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, mmu_idx)) { |
390 | return -1; |
391 | } |
392 | return phys_addr; |
393 | } |
394 | |
395 | void riscv_cpu_unassigned_access(CPUState *cs, hwaddr addr, bool is_write, |
396 | bool is_exec, int unused, unsigned size) |
397 | { |
398 | RISCVCPU *cpu = RISCV_CPU(cs); |
399 | CPURISCVState *env = &cpu->env; |
400 | |
401 | if (is_write) { |
402 | cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; |
403 | } else { |
404 | cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; |
405 | } |
406 | |
407 | env->badaddr = addr; |
408 | riscv_raise_exception(&cpu->env, cs->exception_index, GETPC()); |
409 | } |
410 | |
411 | void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, |
412 | MMUAccessType access_type, int mmu_idx, |
413 | uintptr_t retaddr) |
414 | { |
415 | RISCVCPU *cpu = RISCV_CPU(cs); |
416 | CPURISCVState *env = &cpu->env; |
417 | switch (access_type) { |
418 | case MMU_INST_FETCH: |
419 | cs->exception_index = RISCV_EXCP_INST_ADDR_MIS; |
420 | break; |
421 | case MMU_DATA_LOAD: |
422 | cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS; |
423 | break; |
424 | case MMU_DATA_STORE: |
425 | cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS; |
426 | break; |
427 | default: |
428 | g_assert_not_reached(); |
429 | } |
430 | env->badaddr = addr; |
431 | riscv_raise_exception(env, cs->exception_index, retaddr); |
432 | } |
433 | #endif |
434 | |
435 | bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, |
436 | MMUAccessType access_type, int mmu_idx, |
437 | bool probe, uintptr_t retaddr) |
438 | { |
439 | #ifndef CONFIG_USER_ONLY |
440 | RISCVCPU *cpu = RISCV_CPU(cs); |
441 | CPURISCVState *env = &cpu->env; |
442 | hwaddr pa = 0; |
443 | int prot; |
444 | bool pmp_violation = false; |
445 | int ret = TRANSLATE_FAIL; |
446 | int mode = mmu_idx; |
447 | |
448 | qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n" , |
449 | __func__, address, access_type, mmu_idx); |
450 | |
451 | ret = get_physical_address(env, &pa, &prot, address, access_type, mmu_idx); |
452 | |
453 | if (mode == PRV_M && access_type != MMU_INST_FETCH) { |
454 | if (get_field(env->mstatus, MSTATUS_MPRV)) { |
455 | mode = get_field(env->mstatus, MSTATUS_MPP); |
456 | } |
457 | } |
458 | |
459 | qemu_log_mask(CPU_LOG_MMU, |
460 | "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx |
461 | " prot %d\n" , __func__, address, ret, pa, prot); |
462 | |
463 | if (riscv_feature(env, RISCV_FEATURE_PMP) && |
464 | (ret == TRANSLATE_SUCCESS) && |
465 | !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) { |
466 | ret = TRANSLATE_PMP_FAIL; |
467 | } |
468 | if (ret == TRANSLATE_PMP_FAIL) { |
469 | pmp_violation = true; |
470 | } |
471 | if (ret == TRANSLATE_SUCCESS) { |
472 | tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, |
473 | prot, mmu_idx, TARGET_PAGE_SIZE); |
474 | return true; |
475 | } else if (probe) { |
476 | return false; |
477 | } else { |
478 | raise_mmu_exception(env, address, access_type, pmp_violation); |
479 | riscv_raise_exception(env, cs->exception_index, retaddr); |
480 | } |
481 | #else |
482 | switch (access_type) { |
483 | case MMU_INST_FETCH: |
484 | cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT; |
485 | break; |
486 | case MMU_DATA_LOAD: |
487 | cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT; |
488 | break; |
489 | case MMU_DATA_STORE: |
490 | cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT; |
491 | break; |
492 | } |
493 | cpu_loop_exit_restore(cs, retaddr); |
494 | #endif |
495 | } |
496 | |
497 | /* |
498 | * Handle Traps |
499 | * |
500 | * Adapted from Spike's processor_t::take_trap. |
501 | * |
502 | */ |
503 | void riscv_cpu_do_interrupt(CPUState *cs) |
504 | { |
505 | #if !defined(CONFIG_USER_ONLY) |
506 | |
507 | RISCVCPU *cpu = RISCV_CPU(cs); |
508 | CPURISCVState *env = &cpu->env; |
509 | |
510 | /* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide |
511 | * so we mask off the MSB and separate into trap type and cause. |
512 | */ |
513 | bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); |
514 | target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; |
515 | target_ulong deleg = async ? env->mideleg : env->medeleg; |
516 | target_ulong tval = 0; |
517 | |
518 | static const int ecall_cause_map[] = { |
519 | [PRV_U] = RISCV_EXCP_U_ECALL, |
520 | [PRV_S] = RISCV_EXCP_S_ECALL, |
521 | [PRV_H] = RISCV_EXCP_H_ECALL, |
522 | [PRV_M] = RISCV_EXCP_M_ECALL |
523 | }; |
524 | |
525 | if (!async) { |
526 | /* set tval to badaddr for traps with address information */ |
527 | switch (cause) { |
528 | case RISCV_EXCP_INST_ADDR_MIS: |
529 | case RISCV_EXCP_INST_ACCESS_FAULT: |
530 | case RISCV_EXCP_LOAD_ADDR_MIS: |
531 | case RISCV_EXCP_STORE_AMO_ADDR_MIS: |
532 | case RISCV_EXCP_LOAD_ACCESS_FAULT: |
533 | case RISCV_EXCP_STORE_AMO_ACCESS_FAULT: |
534 | case RISCV_EXCP_INST_PAGE_FAULT: |
535 | case RISCV_EXCP_LOAD_PAGE_FAULT: |
536 | case RISCV_EXCP_STORE_PAGE_FAULT: |
537 | tval = env->badaddr; |
538 | break; |
539 | default: |
540 | break; |
541 | } |
542 | /* ecall is dispatched as one cause so translate based on mode */ |
543 | if (cause == RISCV_EXCP_U_ECALL) { |
544 | assert(env->priv <= 3); |
545 | cause = ecall_cause_map[env->priv]; |
546 | } |
547 | } |
548 | |
549 | trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, cause < 16 ? |
550 | (async ? riscv_intr_names : riscv_excp_names)[cause] : "(unknown)" ); |
551 | |
552 | if (env->priv <= PRV_S && |
553 | cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { |
554 | /* handle the trap in S-mode */ |
555 | target_ulong s = env->mstatus; |
556 | s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? |
557 | get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv)); |
558 | s = set_field(s, MSTATUS_SPP, env->priv); |
559 | s = set_field(s, MSTATUS_SIE, 0); |
560 | env->mstatus = s; |
561 | env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1)); |
562 | env->sepc = env->pc; |
563 | env->sbadaddr = tval; |
564 | env->pc = (env->stvec >> 2 << 2) + |
565 | ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); |
566 | riscv_cpu_set_mode(env, PRV_S); |
567 | } else { |
568 | /* handle the trap in M-mode */ |
569 | target_ulong s = env->mstatus; |
570 | s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ? |
571 | get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv)); |
572 | s = set_field(s, MSTATUS_MPP, env->priv); |
573 | s = set_field(s, MSTATUS_MIE, 0); |
574 | env->mstatus = s; |
575 | env->mcause = cause | ~(((target_ulong)-1) >> async); |
576 | env->mepc = env->pc; |
577 | env->mbadaddr = tval; |
578 | env->pc = (env->mtvec >> 2 << 2) + |
579 | ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); |
580 | riscv_cpu_set_mode(env, PRV_M); |
581 | } |
582 | |
583 | /* NOTE: it is not necessary to yield load reservations here. It is only |
584 | * necessary for an SC from "another hart" to cause a load reservation |
585 | * to be yielded. Refer to the memory consistency model section of the |
586 | * RISC-V ISA Specification. |
587 | */ |
588 | |
589 | #endif |
590 | cs->exception_index = EXCP_NONE; /* mark handled to qemu */ |
591 | } |
592 | |