1/*
2 * Sparc MMU helpers
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "qemu/qemu-print.h"
24#include "trace.h"
25
26/* Sparc MMU emulation */
27
28#if defined(CONFIG_USER_ONLY)
29
30bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
31 MMUAccessType access_type, int mmu_idx,
32 bool probe, uintptr_t retaddr)
33{
34 SPARCCPU *cpu = SPARC_CPU(cs);
35 CPUSPARCState *env = &cpu->env;
36
37 if (access_type == MMU_INST_FETCH) {
38 cs->exception_index = TT_TFAULT;
39 } else {
40 cs->exception_index = TT_DFAULT;
41#ifdef TARGET_SPARC64
42 env->dmmu.mmuregs[4] = address;
43#else
44 env->mmuregs[4] = address;
45#endif
46 }
47 cpu_loop_exit_restore(cs, retaddr);
48}
49
50#else
51
52#ifndef TARGET_SPARC64
53/*
54 * Sparc V8 Reference MMU (SRMMU)
55 */
56static const int access_table[8][8] = {
57 { 0, 0, 0, 0, 8, 0, 12, 12 },
58 { 0, 0, 0, 0, 8, 0, 0, 0 },
59 { 8, 8, 0, 0, 0, 8, 12, 12 },
60 { 8, 8, 0, 0, 0, 8, 0, 0 },
61 { 8, 0, 8, 0, 8, 8, 12, 12 },
62 { 8, 0, 8, 0, 8, 0, 8, 0 },
63 { 8, 8, 8, 0, 8, 8, 12, 12 },
64 { 8, 8, 8, 0, 8, 8, 8, 0 }
65};
66
67static const int perm_table[2][8] = {
68 {
69 PAGE_READ,
70 PAGE_READ | PAGE_WRITE,
71 PAGE_READ | PAGE_EXEC,
72 PAGE_READ | PAGE_WRITE | PAGE_EXEC,
73 PAGE_EXEC,
74 PAGE_READ | PAGE_WRITE,
75 PAGE_READ | PAGE_EXEC,
76 PAGE_READ | PAGE_WRITE | PAGE_EXEC
77 },
78 {
79 PAGE_READ,
80 PAGE_READ | PAGE_WRITE,
81 PAGE_READ | PAGE_EXEC,
82 PAGE_READ | PAGE_WRITE | PAGE_EXEC,
83 PAGE_EXEC,
84 PAGE_READ,
85 0,
86 0,
87 }
88};
89
90static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
91 int *prot, int *access_index, MemTxAttrs *attrs,
92 target_ulong address, int rw, int mmu_idx,
93 target_ulong *page_size)
94{
95 int access_perms = 0;
96 hwaddr pde_ptr;
97 uint32_t pde;
98 int error_code = 0, is_dirty, is_user;
99 unsigned long page_offset;
100 CPUState *cs = env_cpu(env);
101
102 is_user = mmu_idx == MMU_USER_IDX;
103
104 if (mmu_idx == MMU_PHYS_IDX) {
105 *page_size = TARGET_PAGE_SIZE;
106 /* Boot mode: instruction fetches are taken from PROM */
107 if (rw == 2 && (env->mmuregs[0] & env->def.mmu_bm)) {
108 *physical = env->prom_addr | (address & 0x7ffffULL);
109 *prot = PAGE_READ | PAGE_EXEC;
110 return 0;
111 }
112 *physical = address;
113 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
114 return 0;
115 }
116
117 *access_index = ((rw & 1) << 2) | (rw & 2) | (is_user ? 0 : 1);
118 *physical = 0xffffffffffff0000ULL;
119
120 /* SPARC reference MMU table walk: Context table->L1->L2->PTE */
121 /* Context base + context number */
122 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
123 pde = ldl_phys(cs->as, pde_ptr);
124
125 /* Ctx pde */
126 switch (pde & PTE_ENTRYTYPE_MASK) {
127 default:
128 case 0: /* Invalid */
129 return 1 << 2;
130 case 2: /* L0 PTE, maybe should not happen? */
131 case 3: /* Reserved */
132 return 4 << 2;
133 case 1: /* L0 PDE */
134 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
135 pde = ldl_phys(cs->as, pde_ptr);
136
137 switch (pde & PTE_ENTRYTYPE_MASK) {
138 default:
139 case 0: /* Invalid */
140 return (1 << 8) | (1 << 2);
141 case 3: /* Reserved */
142 return (1 << 8) | (4 << 2);
143 case 1: /* L1 PDE */
144 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
145 pde = ldl_phys(cs->as, pde_ptr);
146
147 switch (pde & PTE_ENTRYTYPE_MASK) {
148 default:
149 case 0: /* Invalid */
150 return (2 << 8) | (1 << 2);
151 case 3: /* Reserved */
152 return (2 << 8) | (4 << 2);
153 case 1: /* L2 PDE */
154 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
155 pde = ldl_phys(cs->as, pde_ptr);
156
157 switch (pde & PTE_ENTRYTYPE_MASK) {
158 default:
159 case 0: /* Invalid */
160 return (3 << 8) | (1 << 2);
161 case 1: /* PDE, should not happen */
162 case 3: /* Reserved */
163 return (3 << 8) | (4 << 2);
164 case 2: /* L3 PTE */
165 page_offset = 0;
166 }
167 *page_size = TARGET_PAGE_SIZE;
168 break;
169 case 2: /* L2 PTE */
170 page_offset = address & 0x3f000;
171 *page_size = 0x40000;
172 }
173 break;
174 case 2: /* L1 PTE */
175 page_offset = address & 0xfff000;
176 *page_size = 0x1000000;
177 }
178 }
179
180 /* check access */
181 access_perms = (pde & PTE_ACCESS_MASK) >> PTE_ACCESS_SHIFT;
182 error_code = access_table[*access_index][access_perms];
183 if (error_code && !((env->mmuregs[0] & MMU_NF) && is_user)) {
184 return error_code;
185 }
186
187 /* update page modified and dirty bits */
188 is_dirty = (rw & 1) && !(pde & PG_MODIFIED_MASK);
189 if (!(pde & PG_ACCESSED_MASK) || is_dirty) {
190 pde |= PG_ACCESSED_MASK;
191 if (is_dirty) {
192 pde |= PG_MODIFIED_MASK;
193 }
194 stl_phys_notdirty(cs->as, pde_ptr, pde);
195 }
196
197 /* the page can be put in the TLB */
198 *prot = perm_table[is_user][access_perms];
199 if (!(pde & PG_MODIFIED_MASK)) {
200 /* only set write access if already dirty... otherwise wait
201 for dirty access */
202 *prot &= ~PAGE_WRITE;
203 }
204
205 /* Even if large ptes, we map only one 4KB page in the cache to
206 avoid filling it too fast */
207 *physical = ((hwaddr)(pde & PTE_ADDR_MASK) << 4) + page_offset;
208 return error_code;
209}
210
211/* Perform address translation */
212bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
213 MMUAccessType access_type, int mmu_idx,
214 bool probe, uintptr_t retaddr)
215{
216 SPARCCPU *cpu = SPARC_CPU(cs);
217 CPUSPARCState *env = &cpu->env;
218 hwaddr paddr;
219 target_ulong vaddr;
220 target_ulong page_size;
221 int error_code = 0, prot, access_index;
222 MemTxAttrs attrs = {};
223
224 /*
225 * TODO: If we ever need tlb_vaddr_to_host for this target,
226 * then we must figure out how to manipulate FSR and FAR
227 * when both MMU_NF and probe are set. In the meantime,
228 * do not support this use case.
229 */
230 assert(!probe);
231
232 address &= TARGET_PAGE_MASK;
233 error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs,
234 address, access_type,
235 mmu_idx, &page_size);
236 vaddr = address;
237 if (likely(error_code == 0)) {
238 qemu_log_mask(CPU_LOG_MMU,
239 "Translate at %" VADDR_PRIx " -> "
240 TARGET_FMT_plx ", vaddr " TARGET_FMT_lx "\n",
241 address, paddr, vaddr);
242 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, page_size);
243 return true;
244 }
245
246 if (env->mmuregs[3]) { /* Fault status register */
247 env->mmuregs[3] = 1; /* overflow (not read before another fault) */
248 }
249 env->mmuregs[3] |= (access_index << 5) | error_code | 2;
250 env->mmuregs[4] = address; /* Fault address register */
251
252 if ((env->mmuregs[0] & MMU_NF) || env->psret == 0) {
253 /* No fault mode: if a mapping is available, just override
254 permissions. If no mapping is available, redirect accesses to
255 neverland. Fake/overridden mappings will be flushed when
256 switching to normal mode. */
257 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
258 tlb_set_page(cs, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
259 return true;
260 } else {
261 if (access_type == MMU_INST_FETCH) {
262 cs->exception_index = TT_TFAULT;
263 } else {
264 cs->exception_index = TT_DFAULT;
265 }
266 cpu_loop_exit_restore(cs, retaddr);
267 }
268}
269
270target_ulong mmu_probe(CPUSPARCState *env, target_ulong address, int mmulev)
271{
272 CPUState *cs = env_cpu(env);
273 hwaddr pde_ptr;
274 uint32_t pde;
275
276 /* Context base + context number */
277 pde_ptr = (hwaddr)(env->mmuregs[1] << 4) +
278 (env->mmuregs[2] << 2);
279 pde = ldl_phys(cs->as, pde_ptr);
280
281 switch (pde & PTE_ENTRYTYPE_MASK) {
282 default:
283 case 0: /* Invalid */
284 case 2: /* PTE, maybe should not happen? */
285 case 3: /* Reserved */
286 return 0;
287 case 1: /* L1 PDE */
288 if (mmulev == 3) {
289 return pde;
290 }
291 pde_ptr = ((address >> 22) & ~3) + ((pde & ~3) << 4);
292 pde = ldl_phys(cs->as, pde_ptr);
293
294 switch (pde & PTE_ENTRYTYPE_MASK) {
295 default:
296 case 0: /* Invalid */
297 case 3: /* Reserved */
298 return 0;
299 case 2: /* L1 PTE */
300 return pde;
301 case 1: /* L2 PDE */
302 if (mmulev == 2) {
303 return pde;
304 }
305 pde_ptr = ((address & 0xfc0000) >> 16) + ((pde & ~3) << 4);
306 pde = ldl_phys(cs->as, pde_ptr);
307
308 switch (pde & PTE_ENTRYTYPE_MASK) {
309 default:
310 case 0: /* Invalid */
311 case 3: /* Reserved */
312 return 0;
313 case 2: /* L2 PTE */
314 return pde;
315 case 1: /* L3 PDE */
316 if (mmulev == 1) {
317 return pde;
318 }
319 pde_ptr = ((address & 0x3f000) >> 10) + ((pde & ~3) << 4);
320 pde = ldl_phys(cs->as, pde_ptr);
321
322 switch (pde & PTE_ENTRYTYPE_MASK) {
323 default:
324 case 0: /* Invalid */
325 case 1: /* PDE, should not happen */
326 case 3: /* Reserved */
327 return 0;
328 case 2: /* L3 PTE */
329 return pde;
330 }
331 }
332 }
333 }
334 return 0;
335}
336
337void dump_mmu(CPUSPARCState *env)
338{
339 CPUState *cs = env_cpu(env);
340 target_ulong va, va1, va2;
341 unsigned int n, m, o;
342 hwaddr pde_ptr, pa;
343 uint32_t pde;
344
345 pde_ptr = (env->mmuregs[1] << 4) + (env->mmuregs[2] << 2);
346 pde = ldl_phys(cs->as, pde_ptr);
347 qemu_printf("Root ptr: " TARGET_FMT_plx ", ctx: %d\n",
348 (hwaddr)env->mmuregs[1] << 4, env->mmuregs[2]);
349 for (n = 0, va = 0; n < 256; n++, va += 16 * 1024 * 1024) {
350 pde = mmu_probe(env, va, 2);
351 if (pde) {
352 pa = cpu_get_phys_page_debug(cs, va);
353 qemu_printf("VA: " TARGET_FMT_lx ", PA: " TARGET_FMT_plx
354 " PDE: " TARGET_FMT_lx "\n", va, pa, pde);
355 for (m = 0, va1 = va; m < 64; m++, va1 += 256 * 1024) {
356 pde = mmu_probe(env, va1, 1);
357 if (pde) {
358 pa = cpu_get_phys_page_debug(cs, va1);
359 qemu_printf(" VA: " TARGET_FMT_lx ", PA: "
360 TARGET_FMT_plx " PDE: " TARGET_FMT_lx "\n",
361 va1, pa, pde);
362 for (o = 0, va2 = va1; o < 64; o++, va2 += 4 * 1024) {
363 pde = mmu_probe(env, va2, 0);
364 if (pde) {
365 pa = cpu_get_phys_page_debug(cs, va2);
366 qemu_printf(" VA: " TARGET_FMT_lx ", PA: "
367 TARGET_FMT_plx " PTE: "
368 TARGET_FMT_lx "\n",
369 va2, pa, pde);
370 }
371 }
372 }
373 }
374 }
375 }
376}
377
378/* Gdb expects all registers windows to be flushed in ram. This function handles
379 * reads (and only reads) in stack frames as if windows were flushed. We assume
380 * that the sparc ABI is followed.
381 */
382int sparc_cpu_memory_rw_debug(CPUState *cs, vaddr address,
383 uint8_t *buf, int len, bool is_write)
384{
385 SPARCCPU *cpu = SPARC_CPU(cs);
386 CPUSPARCState *env = &cpu->env;
387 target_ulong addr = address;
388 int i;
389 int len1;
390 int cwp = env->cwp;
391
392 if (!is_write) {
393 for (i = 0; i < env->nwindows; i++) {
394 int off;
395 target_ulong fp = env->regbase[cwp * 16 + 22];
396
397 /* Assume fp == 0 means end of frame. */
398 if (fp == 0) {
399 break;
400 }
401
402 cwp = cpu_cwp_inc(env, cwp + 1);
403
404 /* Invalid window ? */
405 if (env->wim & (1 << cwp)) {
406 break;
407 }
408
409 /* According to the ABI, the stack is growing downward. */
410 if (addr + len < fp) {
411 break;
412 }
413
414 /* Not in this frame. */
415 if (addr > fp + 64) {
416 continue;
417 }
418
419 /* Handle access before this window. */
420 if (addr < fp) {
421 len1 = fp - addr;
422 if (cpu_memory_rw_debug(cs, addr, buf, len1, is_write) != 0) {
423 return -1;
424 }
425 addr += len1;
426 len -= len1;
427 buf += len1;
428 }
429
430 /* Access byte per byte to registers. Not very efficient but speed
431 * is not critical.
432 */
433 off = addr - fp;
434 len1 = 64 - off;
435
436 if (len1 > len) {
437 len1 = len;
438 }
439
440 for (; len1; len1--) {
441 int reg = cwp * 16 + 8 + (off >> 2);
442 union {
443 uint32_t v;
444 uint8_t c[4];
445 } u;
446 u.v = cpu_to_be32(env->regbase[reg]);
447 *buf++ = u.c[off & 3];
448 addr++;
449 len--;
450 off++;
451 }
452
453 if (len == 0) {
454 return 0;
455 }
456 }
457 }
458 return cpu_memory_rw_debug(cs, addr, buf, len, is_write);
459}
460
461#else /* !TARGET_SPARC64 */
462
463/* 41 bit physical address space */
464static inline hwaddr ultrasparc_truncate_physical(uint64_t x)
465{
466 return x & 0x1ffffffffffULL;
467}
468
469/*
470 * UltraSparc IIi I/DMMUs
471 */
472
473/* Returns true if TTE tag is valid and matches virtual address value
474 in context requires virtual address mask value calculated from TTE
475 entry size */
476static inline int ultrasparc_tag_match(SparcTLBEntry *tlb,
477 uint64_t address, uint64_t context,
478 hwaddr *physical)
479{
480 uint64_t mask = -(8192ULL << 3 * TTE_PGSIZE(tlb->tte));
481
482 /* valid, context match, virtual address match? */
483 if (TTE_IS_VALID(tlb->tte) &&
484 (TTE_IS_GLOBAL(tlb->tte) || tlb_compare_context(tlb, context))
485 && compare_masked(address, tlb->tag, mask)) {
486 /* decode physical address */
487 *physical = ((tlb->tte & mask) | (address & ~mask)) & 0x1ffffffe000ULL;
488 return 1;
489 }
490
491 return 0;
492}
493
494static int get_physical_address_data(CPUSPARCState *env, hwaddr *physical,
495 int *prot, MemTxAttrs *attrs,
496 target_ulong address, int rw, int mmu_idx)
497{
498 CPUState *cs = env_cpu(env);
499 unsigned int i;
500 uint64_t context;
501 uint64_t sfsr = 0;
502 bool is_user = false;
503
504 switch (mmu_idx) {
505 case MMU_PHYS_IDX:
506 g_assert_not_reached();
507 case MMU_USER_IDX:
508 is_user = true;
509 /* fallthru */
510 case MMU_KERNEL_IDX:
511 context = env->dmmu.mmu_primary_context & 0x1fff;
512 sfsr |= SFSR_CT_PRIMARY;
513 break;
514 case MMU_USER_SECONDARY_IDX:
515 is_user = true;
516 /* fallthru */
517 case MMU_KERNEL_SECONDARY_IDX:
518 context = env->dmmu.mmu_secondary_context & 0x1fff;
519 sfsr |= SFSR_CT_SECONDARY;
520 break;
521 case MMU_NUCLEUS_IDX:
522 sfsr |= SFSR_CT_NUCLEUS;
523 /* FALLTHRU */
524 default:
525 context = 0;
526 break;
527 }
528
529 if (rw == 1) {
530 sfsr |= SFSR_WRITE_BIT;
531 } else if (rw == 4) {
532 sfsr |= SFSR_NF_BIT;
533 }
534
535 for (i = 0; i < 64; i++) {
536 /* ctx match, vaddr match, valid? */
537 if (ultrasparc_tag_match(&env->dtlb[i], address, context, physical)) {
538 int do_fault = 0;
539
540 if (TTE_IS_IE(env->dtlb[i].tte)) {
541 attrs->byte_swap = true;
542 }
543
544 /* access ok? */
545 /* multiple bits in SFSR.FT may be set on TT_DFAULT */
546 if (TTE_IS_PRIV(env->dtlb[i].tte) && is_user) {
547 do_fault = 1;
548 sfsr |= SFSR_FT_PRIV_BIT; /* privilege violation */
549 trace_mmu_helper_dfault(address, context, mmu_idx, env->tl);
550 }
551 if (rw == 4) {
552 if (TTE_IS_SIDEEFFECT(env->dtlb[i].tte)) {
553 do_fault = 1;
554 sfsr |= SFSR_FT_NF_E_BIT;
555 }
556 } else {
557 if (TTE_IS_NFO(env->dtlb[i].tte)) {
558 do_fault = 1;
559 sfsr |= SFSR_FT_NFO_BIT;
560 }
561 }
562
563 if (do_fault) {
564 /* faults above are reported with TT_DFAULT. */
565 cs->exception_index = TT_DFAULT;
566 } else if (!TTE_IS_W_OK(env->dtlb[i].tte) && (rw == 1)) {
567 do_fault = 1;
568 cs->exception_index = TT_DPROT;
569
570 trace_mmu_helper_dprot(address, context, mmu_idx, env->tl);
571 }
572
573 if (!do_fault) {
574 *prot = PAGE_READ;
575 if (TTE_IS_W_OK(env->dtlb[i].tte)) {
576 *prot |= PAGE_WRITE;
577 }
578
579 TTE_SET_USED(env->dtlb[i].tte);
580
581 return 0;
582 }
583
584 if (env->dmmu.sfsr & SFSR_VALID_BIT) { /* Fault status register */
585 sfsr |= SFSR_OW_BIT; /* overflow (not read before
586 another fault) */
587 }
588
589 if (env->pstate & PS_PRIV) {
590 sfsr |= SFSR_PR_BIT;
591 }
592
593 /* FIXME: ASI field in SFSR must be set */
594 env->dmmu.sfsr = sfsr | SFSR_VALID_BIT;
595
596 env->dmmu.sfar = address; /* Fault address register */
597
598 env->dmmu.tag_access = (address & ~0x1fffULL) | context;
599
600 return 1;
601 }
602 }
603
604 trace_mmu_helper_dmiss(address, context);
605
606 /*
607 * On MMU misses:
608 * - UltraSPARC IIi: SFSR and SFAR unmodified
609 * - JPS1: SFAR updated and some fields of SFSR updated
610 */
611 env->dmmu.tag_access = (address & ~0x1fffULL) | context;
612 cs->exception_index = TT_DMISS;
613 return 1;
614}
615
616static int get_physical_address_code(CPUSPARCState *env, hwaddr *physical,
617 int *prot, MemTxAttrs *attrs,
618 target_ulong address, int mmu_idx)
619{
620 CPUState *cs = env_cpu(env);
621 unsigned int i;
622 uint64_t context;
623 bool is_user = false;
624
625 switch (mmu_idx) {
626 case MMU_PHYS_IDX:
627 case MMU_USER_SECONDARY_IDX:
628 case MMU_KERNEL_SECONDARY_IDX:
629 g_assert_not_reached();
630 case MMU_USER_IDX:
631 is_user = true;
632 /* fallthru */
633 case MMU_KERNEL_IDX:
634 context = env->dmmu.mmu_primary_context & 0x1fff;
635 break;
636 default:
637 context = 0;
638 break;
639 }
640
641 if (env->tl == 0) {
642 /* PRIMARY context */
643 context = env->dmmu.mmu_primary_context & 0x1fff;
644 } else {
645 /* NUCLEUS context */
646 context = 0;
647 }
648
649 for (i = 0; i < 64; i++) {
650 /* ctx match, vaddr match, valid? */
651 if (ultrasparc_tag_match(&env->itlb[i],
652 address, context, physical)) {
653 /* access ok? */
654 if (TTE_IS_PRIV(env->itlb[i].tte) && is_user) {
655 /* Fault status register */
656 if (env->immu.sfsr & SFSR_VALID_BIT) {
657 env->immu.sfsr = SFSR_OW_BIT; /* overflow (not read before
658 another fault) */
659 } else {
660 env->immu.sfsr = 0;
661 }
662 if (env->pstate & PS_PRIV) {
663 env->immu.sfsr |= SFSR_PR_BIT;
664 }
665 if (env->tl > 0) {
666 env->immu.sfsr |= SFSR_CT_NUCLEUS;
667 }
668
669 /* FIXME: ASI field in SFSR must be set */
670 env->immu.sfsr |= SFSR_FT_PRIV_BIT | SFSR_VALID_BIT;
671 cs->exception_index = TT_TFAULT;
672
673 env->immu.tag_access = (address & ~0x1fffULL) | context;
674
675 trace_mmu_helper_tfault(address, context);
676
677 return 1;
678 }
679 *prot = PAGE_EXEC;
680 TTE_SET_USED(env->itlb[i].tte);
681 return 0;
682 }
683 }
684
685 trace_mmu_helper_tmiss(address, context);
686
687 /* Context is stored in DMMU (dmmuregs[1]) also for IMMU */
688 env->immu.tag_access = (address & ~0x1fffULL) | context;
689 cs->exception_index = TT_TMISS;
690 return 1;
691}
692
693static int get_physical_address(CPUSPARCState *env, hwaddr *physical,
694 int *prot, int *access_index, MemTxAttrs *attrs,
695 target_ulong address, int rw, int mmu_idx,
696 target_ulong *page_size)
697{
698 /* ??? We treat everything as a small page, then explicitly flush
699 everything when an entry is evicted. */
700 *page_size = TARGET_PAGE_SIZE;
701
702 /* safety net to catch wrong softmmu index use from dynamic code */
703 if (env->tl > 0 && mmu_idx != MMU_NUCLEUS_IDX) {
704 if (rw == 2) {
705 trace_mmu_helper_get_phys_addr_code(env->tl, mmu_idx,
706 env->dmmu.mmu_primary_context,
707 env->dmmu.mmu_secondary_context,
708 address);
709 } else {
710 trace_mmu_helper_get_phys_addr_data(env->tl, mmu_idx,
711 env->dmmu.mmu_primary_context,
712 env->dmmu.mmu_secondary_context,
713 address);
714 }
715 }
716
717 if (mmu_idx == MMU_PHYS_IDX) {
718 *physical = ultrasparc_truncate_physical(address);
719 *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
720 return 0;
721 }
722
723 if (rw == 2) {
724 return get_physical_address_code(env, physical, prot, attrs, address,
725 mmu_idx);
726 } else {
727 return get_physical_address_data(env, physical, prot, attrs, address,
728 rw, mmu_idx);
729 }
730}
731
732/* Perform address translation */
733bool sparc_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
734 MMUAccessType access_type, int mmu_idx,
735 bool probe, uintptr_t retaddr)
736{
737 SPARCCPU *cpu = SPARC_CPU(cs);
738 CPUSPARCState *env = &cpu->env;
739 target_ulong vaddr;
740 hwaddr paddr;
741 target_ulong page_size;
742 MemTxAttrs attrs = {};
743 int error_code = 0, prot, access_index;
744
745 address &= TARGET_PAGE_MASK;
746 error_code = get_physical_address(env, &paddr, &prot, &access_index, &attrs,
747 address, access_type,
748 mmu_idx, &page_size);
749 if (likely(error_code == 0)) {
750 vaddr = address;
751
752 trace_mmu_helper_mmu_fault(address, paddr, mmu_idx, env->tl,
753 env->dmmu.mmu_primary_context,
754 env->dmmu.mmu_secondary_context);
755
756 tlb_set_page_with_attrs(cs, vaddr, paddr, attrs, prot, mmu_idx,
757 page_size);
758 return true;
759 }
760 if (probe) {
761 return false;
762 }
763 cpu_loop_exit_restore(cs, retaddr);
764}
765
766void dump_mmu(CPUSPARCState *env)
767{
768 unsigned int i;
769 const char *mask;
770
771 qemu_printf("MMU contexts: Primary: %" PRId64 ", Secondary: %"
772 PRId64 "\n",
773 env->dmmu.mmu_primary_context,
774 env->dmmu.mmu_secondary_context);
775 qemu_printf("DMMU Tag Access: %" PRIx64 ", TSB Tag Target: %" PRIx64
776 "\n", env->dmmu.tag_access, env->dmmu.tsb_tag_target);
777 if ((env->lsu & DMMU_E) == 0) {
778 qemu_printf("DMMU disabled\n");
779 } else {
780 qemu_printf("DMMU dump\n");
781 for (i = 0; i < 64; i++) {
782 switch (TTE_PGSIZE(env->dtlb[i].tte)) {
783 default:
784 case 0x0:
785 mask = " 8k";
786 break;
787 case 0x1:
788 mask = " 64k";
789 break;
790 case 0x2:
791 mask = "512k";
792 break;
793 case 0x3:
794 mask = " 4M";
795 break;
796 }
797 if (TTE_IS_VALID(env->dtlb[i].tte)) {
798 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx"
799 ", %s, %s, %s, %s, ie %s, ctx %" PRId64 " %s\n",
800 i,
801 env->dtlb[i].tag & (uint64_t)~0x1fffULL,
802 TTE_PA(env->dtlb[i].tte),
803 mask,
804 TTE_IS_PRIV(env->dtlb[i].tte) ? "priv" : "user",
805 TTE_IS_W_OK(env->dtlb[i].tte) ? "RW" : "RO",
806 TTE_IS_LOCKED(env->dtlb[i].tte) ?
807 "locked" : "unlocked",
808 TTE_IS_IE(env->dtlb[i].tte) ?
809 "yes" : "no",
810 env->dtlb[i].tag & (uint64_t)0x1fffULL,
811 TTE_IS_GLOBAL(env->dtlb[i].tte) ?
812 "global" : "local");
813 }
814 }
815 }
816 if ((env->lsu & IMMU_E) == 0) {
817 qemu_printf("IMMU disabled\n");
818 } else {
819 qemu_printf("IMMU dump\n");
820 for (i = 0; i < 64; i++) {
821 switch (TTE_PGSIZE(env->itlb[i].tte)) {
822 default:
823 case 0x0:
824 mask = " 8k";
825 break;
826 case 0x1:
827 mask = " 64k";
828 break;
829 case 0x2:
830 mask = "512k";
831 break;
832 case 0x3:
833 mask = " 4M";
834 break;
835 }
836 if (TTE_IS_VALID(env->itlb[i].tte)) {
837 qemu_printf("[%02u] VA: %" PRIx64 ", PA: %llx"
838 ", %s, %s, %s, ctx %" PRId64 " %s\n",
839 i,
840 env->itlb[i].tag & (uint64_t)~0x1fffULL,
841 TTE_PA(env->itlb[i].tte),
842 mask,
843 TTE_IS_PRIV(env->itlb[i].tte) ? "priv" : "user",
844 TTE_IS_LOCKED(env->itlb[i].tte) ?
845 "locked" : "unlocked",
846 env->itlb[i].tag & (uint64_t)0x1fffULL,
847 TTE_IS_GLOBAL(env->itlb[i].tte) ?
848 "global" : "local");
849 }
850 }
851 }
852}
853
854#endif /* TARGET_SPARC64 */
855
856static int cpu_sparc_get_phys_page(CPUSPARCState *env, hwaddr *phys,
857 target_ulong addr, int rw, int mmu_idx)
858{
859 target_ulong page_size;
860 int prot, access_index;
861 MemTxAttrs attrs = {};
862
863 return get_physical_address(env, phys, &prot, &access_index, &attrs, addr,
864 rw, mmu_idx, &page_size);
865}
866
867#if defined(TARGET_SPARC64)
868hwaddr cpu_get_phys_page_nofault(CPUSPARCState *env, target_ulong addr,
869 int mmu_idx)
870{
871 hwaddr phys_addr;
872
873 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 4, mmu_idx) != 0) {
874 return -1;
875 }
876 return phys_addr;
877}
878#endif
879
880hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
881{
882 SPARCCPU *cpu = SPARC_CPU(cs);
883 CPUSPARCState *env = &cpu->env;
884 hwaddr phys_addr;
885 int mmu_idx = cpu_mmu_index(env, false);
886
887 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
888 if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 0, mmu_idx) != 0) {
889 return -1;
890 }
891 }
892 return phys_addr;
893}
894#endif
895