1/*
2 * Helpers for loads and stores
3 *
4 * Copyright (c) 2003-2005 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "tcg.h"
23#include "exec/helper-proto.h"
24#include "exec/exec-all.h"
25#include "exec/cpu_ldst.h"
26#include "asi.h"
27
28//#define DEBUG_MMU
29//#define DEBUG_MXCC
30//#define DEBUG_UNALIGNED
31//#define DEBUG_UNASSIGNED
32//#define DEBUG_ASI
33//#define DEBUG_CACHE_CONTROL
34
35#ifdef DEBUG_MMU
36#define DPRINTF_MMU(fmt, ...) \
37 do { printf("MMU: " fmt , ## __VA_ARGS__); } while (0)
38#else
39#define DPRINTF_MMU(fmt, ...) do {} while (0)
40#endif
41
42#ifdef DEBUG_MXCC
43#define DPRINTF_MXCC(fmt, ...) \
44 do { printf("MXCC: " fmt , ## __VA_ARGS__); } while (0)
45#else
46#define DPRINTF_MXCC(fmt, ...) do {} while (0)
47#endif
48
49#ifdef DEBUG_ASI
50#define DPRINTF_ASI(fmt, ...) \
51 do { printf("ASI: " fmt , ## __VA_ARGS__); } while (0)
52#endif
53
54#ifdef DEBUG_CACHE_CONTROL
55#define DPRINTF_CACHE_CONTROL(fmt, ...) \
56 do { printf("CACHE_CONTROL: " fmt , ## __VA_ARGS__); } while (0)
57#else
58#define DPRINTF_CACHE_CONTROL(fmt, ...) do {} while (0)
59#endif
60
61#ifdef TARGET_SPARC64
62#ifndef TARGET_ABI32
63#define AM_CHECK(env1) ((env1)->pstate & PS_AM)
64#else
65#define AM_CHECK(env1) (1)
66#endif
67#endif
68
69#define QT0 (env->qt0)
70#define QT1 (env->qt1)
71
72#if defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY)
73/* Calculates TSB pointer value for fault page size
74 * UltraSPARC IIi has fixed sizes (8k or 64k) for the page pointers
75 * UA2005 holds the page size configuration in mmu_ctx registers */
76static uint64_t ultrasparc_tsb_pointer(CPUSPARCState *env,
77 const SparcV9MMU *mmu, const int idx)
78{
79 uint64_t tsb_register;
80 int page_size;
81 if (cpu_has_hypervisor(env)) {
82 int tsb_index = 0;
83 int ctx = mmu->tag_access & 0x1fffULL;
84 uint64_t ctx_register = mmu->sun4v_ctx_config[ctx ? 1 : 0];
85 tsb_index = idx;
86 tsb_index |= ctx ? 2 : 0;
87 page_size = idx ? ctx_register >> 8 : ctx_register;
88 page_size &= 7;
89 tsb_register = mmu->sun4v_tsb_pointers[tsb_index];
90 } else {
91 page_size = idx;
92 tsb_register = mmu->tsb;
93 }
94 int tsb_split = (tsb_register & 0x1000ULL) ? 1 : 0;
95 int tsb_size = tsb_register & 0xf;
96
97 uint64_t tsb_base_mask = (~0x1fffULL) << tsb_size;
98
99 /* move va bits to correct position,
100 * the context bits will be masked out later */
101 uint64_t va = mmu->tag_access >> (3 * page_size + 9);
102
103 /* calculate tsb_base mask and adjust va if split is in use */
104 if (tsb_split) {
105 if (idx == 0) {
106 va &= ~(1ULL << (13 + tsb_size));
107 } else {
108 va |= (1ULL << (13 + tsb_size));
109 }
110 tsb_base_mask <<= 1;
111 }
112
113 return ((tsb_register & tsb_base_mask) | (va & ~tsb_base_mask)) & ~0xfULL;
114}
115
116/* Calculates tag target register value by reordering bits
117 in tag access register */
118static uint64_t ultrasparc_tag_target(uint64_t tag_access_register)
119{
120 return ((tag_access_register & 0x1fff) << 48) | (tag_access_register >> 22);
121}
122
123static void replace_tlb_entry(SparcTLBEntry *tlb,
124 uint64_t tlb_tag, uint64_t tlb_tte,
125 CPUSPARCState *env)
126{
127 target_ulong mask, size, va, offset;
128
129 /* flush page range if translation is valid */
130 if (TTE_IS_VALID(tlb->tte)) {
131 CPUState *cs = env_cpu(env);
132
133 size = 8192ULL << 3 * TTE_PGSIZE(tlb->tte);
134 mask = 1ULL + ~size;
135
136 va = tlb->tag & mask;
137
138 for (offset = 0; offset < size; offset += TARGET_PAGE_SIZE) {
139 tlb_flush_page(cs, va + offset);
140 }
141 }
142
143 tlb->tag = tlb_tag;
144 tlb->tte = tlb_tte;
145}
146
147static void demap_tlb(SparcTLBEntry *tlb, target_ulong demap_addr,
148 const char *strmmu, CPUSPARCState *env1)
149{
150 unsigned int i;
151 target_ulong mask;
152 uint64_t context;
153
154 int is_demap_context = (demap_addr >> 6) & 1;
155
156 /* demap context */
157 switch ((demap_addr >> 4) & 3) {
158 case 0: /* primary */
159 context = env1->dmmu.mmu_primary_context;
160 break;
161 case 1: /* secondary */
162 context = env1->dmmu.mmu_secondary_context;
163 break;
164 case 2: /* nucleus */
165 context = 0;
166 break;
167 case 3: /* reserved */
168 default:
169 return;
170 }
171
172 for (i = 0; i < 64; i++) {
173 if (TTE_IS_VALID(tlb[i].tte)) {
174
175 if (is_demap_context) {
176 /* will remove non-global entries matching context value */
177 if (TTE_IS_GLOBAL(tlb[i].tte) ||
178 !tlb_compare_context(&tlb[i], context)) {
179 continue;
180 }
181 } else {
182 /* demap page
183 will remove any entry matching VA */
184 mask = 0xffffffffffffe000ULL;
185 mask <<= 3 * ((tlb[i].tte >> 61) & 3);
186
187 if (!compare_masked(demap_addr, tlb[i].tag, mask)) {
188 continue;
189 }
190
191 /* entry should be global or matching context value */
192 if (!TTE_IS_GLOBAL(tlb[i].tte) &&
193 !tlb_compare_context(&tlb[i], context)) {
194 continue;
195 }
196 }
197
198 replace_tlb_entry(&tlb[i], 0, 0, env1);
199#ifdef DEBUG_MMU
200 DPRINTF_MMU("%s demap invalidated entry [%02u]\n", strmmu, i);
201 dump_mmu(env1);
202#endif
203 }
204 }
205}
206
207static uint64_t sun4v_tte_to_sun4u(CPUSPARCState *env, uint64_t tag,
208 uint64_t sun4v_tte)
209{
210 uint64_t sun4u_tte;
211 if (!(cpu_has_hypervisor(env) && (tag & TLB_UST1_IS_SUN4V_BIT))) {
212 /* is already in the sun4u format */
213 return sun4v_tte;
214 }
215 sun4u_tte = TTE_PA(sun4v_tte) | (sun4v_tte & TTE_VALID_BIT);
216 sun4u_tte |= (sun4v_tte & 3ULL) << 61; /* TTE_PGSIZE */
217 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_NFO_BIT_UA2005, TTE_NFO_BIT);
218 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_USED_BIT_UA2005, TTE_USED_BIT);
219 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_W_OK_BIT_UA2005, TTE_W_OK_BIT);
220 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_SIDEEFFECT_BIT_UA2005,
221 TTE_SIDEEFFECT_BIT);
222 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_PRIV_BIT_UA2005, TTE_PRIV_BIT);
223 sun4u_tte |= CONVERT_BIT(sun4v_tte, TTE_LOCKED_BIT_UA2005, TTE_LOCKED_BIT);
224 return sun4u_tte;
225}
226
227static void replace_tlb_1bit_lru(SparcTLBEntry *tlb,
228 uint64_t tlb_tag, uint64_t tlb_tte,
229 const char *strmmu, CPUSPARCState *env1,
230 uint64_t addr)
231{
232 unsigned int i, replace_used;
233
234 tlb_tte = sun4v_tte_to_sun4u(env1, addr, tlb_tte);
235 if (cpu_has_hypervisor(env1)) {
236 uint64_t new_vaddr = tlb_tag & ~0x1fffULL;
237 uint64_t new_size = 8192ULL << 3 * TTE_PGSIZE(tlb_tte);
238 uint32_t new_ctx = tlb_tag & 0x1fffU;
239 for (i = 0; i < 64; i++) {
240 uint32_t ctx = tlb[i].tag & 0x1fffU;
241 /* check if new mapping overlaps an existing one */
242 if (new_ctx == ctx) {
243 uint64_t vaddr = tlb[i].tag & ~0x1fffULL;
244 uint64_t size = 8192ULL << 3 * TTE_PGSIZE(tlb[i].tte);
245 if (new_vaddr == vaddr
246 || (new_vaddr < vaddr + size
247 && vaddr < new_vaddr + new_size)) {
248 DPRINTF_MMU("auto demap entry [%d] %lx->%lx\n", i, vaddr,
249 new_vaddr);
250 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
251 return;
252 }
253 }
254
255 }
256 }
257 /* Try replacing invalid entry */
258 for (i = 0; i < 64; i++) {
259 if (!TTE_IS_VALID(tlb[i].tte)) {
260 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
261#ifdef DEBUG_MMU
262 DPRINTF_MMU("%s lru replaced invalid entry [%i]\n", strmmu, i);
263 dump_mmu(env1);
264#endif
265 return;
266 }
267 }
268
269 /* All entries are valid, try replacing unlocked entry */
270
271 for (replace_used = 0; replace_used < 2; ++replace_used) {
272
273 /* Used entries are not replaced on first pass */
274
275 for (i = 0; i < 64; i++) {
276 if (!TTE_IS_LOCKED(tlb[i].tte) && !TTE_IS_USED(tlb[i].tte)) {
277
278 replace_tlb_entry(&tlb[i], tlb_tag, tlb_tte, env1);
279#ifdef DEBUG_MMU
280 DPRINTF_MMU("%s lru replaced unlocked %s entry [%i]\n",
281 strmmu, (replace_used ? "used" : "unused"), i);
282 dump_mmu(env1);
283#endif
284 return;
285 }
286 }
287
288 /* Now reset used bit and search for unused entries again */
289
290 for (i = 0; i < 64; i++) {
291 TTE_SET_UNUSED(tlb[i].tte);
292 }
293 }
294
295#ifdef DEBUG_MMU
296 DPRINTF_MMU("%s lru replacement: no free entries available, "
297 "replacing the last one\n", strmmu);
298#endif
299 /* corner case: the last entry is replaced anyway */
300 replace_tlb_entry(&tlb[63], tlb_tag, tlb_tte, env1);
301}
302
303#endif
304
305#ifdef TARGET_SPARC64
306/* returns true if access using this ASI is to have address translated by MMU
307 otherwise access is to raw physical address */
308/* TODO: check sparc32 bits */
309static inline int is_translating_asi(int asi)
310{
311 /* Ultrasparc IIi translating asi
312 - note this list is defined by cpu implementation
313 */
314 switch (asi) {
315 case 0x04 ... 0x11:
316 case 0x16 ... 0x19:
317 case 0x1E ... 0x1F:
318 case 0x24 ... 0x2C:
319 case 0x70 ... 0x73:
320 case 0x78 ... 0x79:
321 case 0x80 ... 0xFF:
322 return 1;
323
324 default:
325 return 0;
326 }
327}
328
329static inline target_ulong address_mask(CPUSPARCState *env1, target_ulong addr)
330{
331 if (AM_CHECK(env1)) {
332 addr &= 0xffffffffULL;
333 }
334 return addr;
335}
336
337static inline target_ulong asi_address_mask(CPUSPARCState *env,
338 int asi, target_ulong addr)
339{
340 if (is_translating_asi(asi)) {
341 addr = address_mask(env, addr);
342 }
343 return addr;
344}
345
346#ifndef CONFIG_USER_ONLY
347static inline void do_check_asi(CPUSPARCState *env, int asi, uintptr_t ra)
348{
349 /* ASIs >= 0x80 are user mode.
350 * ASIs >= 0x30 are hyper mode (or super if hyper is not available).
351 * ASIs <= 0x2f are super mode.
352 */
353 if (asi < 0x80
354 && !cpu_hypervisor_mode(env)
355 && (!cpu_supervisor_mode(env)
356 || (asi >= 0x30 && cpu_has_hypervisor(env)))) {
357 cpu_raise_exception_ra(env, TT_PRIV_ACT, ra);
358 }
359}
360#endif /* !CONFIG_USER_ONLY */
361#endif
362
363static void do_check_align(CPUSPARCState *env, target_ulong addr,
364 uint32_t align, uintptr_t ra)
365{
366 if (addr & align) {
367#ifdef DEBUG_UNALIGNED
368 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
369 "\n", addr, env->pc);
370#endif
371 cpu_raise_exception_ra(env, TT_UNALIGNED, ra);
372 }
373}
374
375void helper_check_align(CPUSPARCState *env, target_ulong addr, uint32_t align)
376{
377 do_check_align(env, addr, align, GETPC());
378}
379
380#if !defined(TARGET_SPARC64) && !defined(CONFIG_USER_ONLY) && \
381 defined(DEBUG_MXCC)
382static void dump_mxcc(CPUSPARCState *env)
383{
384 printf("mxccdata: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
385 "\n",
386 env->mxccdata[0], env->mxccdata[1],
387 env->mxccdata[2], env->mxccdata[3]);
388 printf("mxccregs: %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
389 "\n"
390 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64 " %016" PRIx64
391 "\n",
392 env->mxccregs[0], env->mxccregs[1],
393 env->mxccregs[2], env->mxccregs[3],
394 env->mxccregs[4], env->mxccregs[5],
395 env->mxccregs[6], env->mxccregs[7]);
396}
397#endif
398
399#if (defined(TARGET_SPARC64) || !defined(CONFIG_USER_ONLY)) \
400 && defined(DEBUG_ASI)
401static void dump_asi(const char *txt, target_ulong addr, int asi, int size,
402 uint64_t r1)
403{
404 switch (size) {
405 case 1:
406 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %02" PRIx64 "\n", txt,
407 addr, asi, r1 & 0xff);
408 break;
409 case 2:
410 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %04" PRIx64 "\n", txt,
411 addr, asi, r1 & 0xffff);
412 break;
413 case 4:
414 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %08" PRIx64 "\n", txt,
415 addr, asi, r1 & 0xffffffff);
416 break;
417 case 8:
418 DPRINTF_ASI("%s "TARGET_FMT_lx " asi 0x%02x = %016" PRIx64 "\n", txt,
419 addr, asi, r1);
420 break;
421 }
422}
423#endif
424
425#ifndef TARGET_SPARC64
426#ifndef CONFIG_USER_ONLY
427
428
429/* Leon3 cache control */
430
431static void leon3_cache_control_st(CPUSPARCState *env, target_ulong addr,
432 uint64_t val, int size)
433{
434 DPRINTF_CACHE_CONTROL("st addr:%08x, val:%" PRIx64 ", size:%d\n",
435 addr, val, size);
436
437 if (size != 4) {
438 DPRINTF_CACHE_CONTROL("32bits only\n");
439 return;
440 }
441
442 switch (addr) {
443 case 0x00: /* Cache control */
444
445 /* These values must always be read as zeros */
446 val &= ~CACHE_CTRL_FD;
447 val &= ~CACHE_CTRL_FI;
448 val &= ~CACHE_CTRL_IB;
449 val &= ~CACHE_CTRL_IP;
450 val &= ~CACHE_CTRL_DP;
451
452 env->cache_control = val;
453 break;
454 case 0x04: /* Instruction cache configuration */
455 case 0x08: /* Data cache configuration */
456 /* Read Only */
457 break;
458 default:
459 DPRINTF_CACHE_CONTROL("write unknown register %08x\n", addr);
460 break;
461 };
462}
463
464static uint64_t leon3_cache_control_ld(CPUSPARCState *env, target_ulong addr,
465 int size)
466{
467 uint64_t ret = 0;
468
469 if (size != 4) {
470 DPRINTF_CACHE_CONTROL("32bits only\n");
471 return 0;
472 }
473
474 switch (addr) {
475 case 0x00: /* Cache control */
476 ret = env->cache_control;
477 break;
478
479 /* Configuration registers are read and only always keep those
480 predefined values */
481
482 case 0x04: /* Instruction cache configuration */
483 ret = 0x10220000;
484 break;
485 case 0x08: /* Data cache configuration */
486 ret = 0x18220000;
487 break;
488 default:
489 DPRINTF_CACHE_CONTROL("read unknown register %08x\n", addr);
490 break;
491 };
492 DPRINTF_CACHE_CONTROL("ld addr:%08x, ret:0x%" PRIx64 ", size:%d\n",
493 addr, ret, size);
494 return ret;
495}
496
497uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
498 int asi, uint32_t memop)
499{
500 int size = 1 << (memop & MO_SIZE);
501 int sign = memop & MO_SIGN;
502 CPUState *cs = env_cpu(env);
503 uint64_t ret = 0;
504#if defined(DEBUG_MXCC) || defined(DEBUG_ASI)
505 uint32_t last_addr = addr;
506#endif
507
508 do_check_align(env, addr, size - 1, GETPC());
509 switch (asi) {
510 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
511 /* case ASI_LEON_CACHEREGS: Leon3 cache control */
512 switch (addr) {
513 case 0x00: /* Leon3 Cache Control */
514 case 0x08: /* Leon3 Instruction Cache config */
515 case 0x0C: /* Leon3 Date Cache config */
516 if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
517 ret = leon3_cache_control_ld(env, addr, size);
518 }
519 break;
520 case 0x01c00a00: /* MXCC control register */
521 if (size == 8) {
522 ret = env->mxccregs[3];
523 } else {
524 qemu_log_mask(LOG_UNIMP,
525 "%08x: unimplemented access size: %d\n", addr,
526 size);
527 }
528 break;
529 case 0x01c00a04: /* MXCC control register */
530 if (size == 4) {
531 ret = env->mxccregs[3];
532 } else {
533 qemu_log_mask(LOG_UNIMP,
534 "%08x: unimplemented access size: %d\n", addr,
535 size);
536 }
537 break;
538 case 0x01c00c00: /* Module reset register */
539 if (size == 8) {
540 ret = env->mxccregs[5];
541 /* should we do something here? */
542 } else {
543 qemu_log_mask(LOG_UNIMP,
544 "%08x: unimplemented access size: %d\n", addr,
545 size);
546 }
547 break;
548 case 0x01c00f00: /* MBus port address register */
549 if (size == 8) {
550 ret = env->mxccregs[7];
551 } else {
552 qemu_log_mask(LOG_UNIMP,
553 "%08x: unimplemented access size: %d\n", addr,
554 size);
555 }
556 break;
557 default:
558 qemu_log_mask(LOG_UNIMP,
559 "%08x: unimplemented address, size: %d\n", addr,
560 size);
561 break;
562 }
563 DPRINTF_MXCC("asi = %d, size = %d, sign = %d, "
564 "addr = %08x -> ret = %" PRIx64 ","
565 "addr = %08x\n", asi, size, sign, last_addr, ret, addr);
566#ifdef DEBUG_MXCC
567 dump_mxcc(env);
568#endif
569 break;
570 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU probe */
571 case ASI_LEON_MMUFLUSH: /* LEON3 MMU probe */
572 {
573 int mmulev;
574
575 mmulev = (addr >> 8) & 15;
576 if (mmulev > 4) {
577 ret = 0;
578 } else {
579 ret = mmu_probe(env, addr, mmulev);
580 }
581 DPRINTF_MMU("mmu_probe: 0x%08x (lev %d) -> 0x%08" PRIx64 "\n",
582 addr, mmulev, ret);
583 }
584 break;
585 case ASI_M_MMUREGS: /* SuperSparc MMU regs */
586 case ASI_LEON_MMUREGS: /* LEON3 MMU regs */
587 {
588 int reg = (addr >> 8) & 0x1f;
589
590 ret = env->mmuregs[reg];
591 if (reg == 3) { /* Fault status cleared on read */
592 env->mmuregs[3] = 0;
593 } else if (reg == 0x13) { /* Fault status read */
594 ret = env->mmuregs[3];
595 } else if (reg == 0x14) { /* Fault address read */
596 ret = env->mmuregs[4];
597 }
598 DPRINTF_MMU("mmu_read: reg[%d] = 0x%08" PRIx64 "\n", reg, ret);
599 }
600 break;
601 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
602 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
603 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
604 break;
605 case ASI_KERNELTXT: /* Supervisor code access */
606 switch (size) {
607 case 1:
608 ret = cpu_ldub_code(env, addr);
609 break;
610 case 2:
611 ret = cpu_lduw_code(env, addr);
612 break;
613 default:
614 case 4:
615 ret = cpu_ldl_code(env, addr);
616 break;
617 case 8:
618 ret = cpu_ldq_code(env, addr);
619 break;
620 }
621 break;
622 case ASI_M_TXTC_TAG: /* SparcStation 5 I-cache tag */
623 case ASI_M_TXTC_DATA: /* SparcStation 5 I-cache data */
624 case ASI_M_DATAC_TAG: /* SparcStation 5 D-cache tag */
625 case ASI_M_DATAC_DATA: /* SparcStation 5 D-cache data */
626 break;
627 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
628 switch (size) {
629 case 1:
630 ret = ldub_phys(cs->as, (hwaddr)addr
631 | ((hwaddr)(asi & 0xf) << 32));
632 break;
633 case 2:
634 ret = lduw_phys(cs->as, (hwaddr)addr
635 | ((hwaddr)(asi & 0xf) << 32));
636 break;
637 default:
638 case 4:
639 ret = ldl_phys(cs->as, (hwaddr)addr
640 | ((hwaddr)(asi & 0xf) << 32));
641 break;
642 case 8:
643 ret = ldq_phys(cs->as, (hwaddr)addr
644 | ((hwaddr)(asi & 0xf) << 32));
645 break;
646 }
647 break;
648 case 0x30: /* Turbosparc secondary cache diagnostic */
649 case 0x31: /* Turbosparc RAM snoop */
650 case 0x32: /* Turbosparc page table descriptor diagnostic */
651 case 0x39: /* data cache diagnostic register */
652 ret = 0;
653 break;
654 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers */
655 {
656 int reg = (addr >> 8) & 3;
657
658 switch (reg) {
659 case 0: /* Breakpoint Value (Addr) */
660 ret = env->mmubpregs[reg];
661 break;
662 case 1: /* Breakpoint Mask */
663 ret = env->mmubpregs[reg];
664 break;
665 case 2: /* Breakpoint Control */
666 ret = env->mmubpregs[reg];
667 break;
668 case 3: /* Breakpoint Status */
669 ret = env->mmubpregs[reg];
670 env->mmubpregs[reg] = 0ULL;
671 break;
672 }
673 DPRINTF_MMU("read breakpoint reg[%d] 0x%016" PRIx64 "\n", reg,
674 ret);
675 }
676 break;
677 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
678 ret = env->mmubpctrv;
679 break;
680 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
681 ret = env->mmubpctrc;
682 break;
683 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
684 ret = env->mmubpctrs;
685 break;
686 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
687 ret = env->mmubpaction;
688 break;
689 case ASI_USERTXT: /* User code access, XXX */
690 default:
691 cpu_unassigned_access(cs, addr, false, false, asi, size);
692 ret = 0;
693 break;
694
695 case ASI_USERDATA: /* User data access */
696 case ASI_KERNELDATA: /* Supervisor data access */
697 case ASI_P: /* Implicit primary context data access (v9 only?) */
698 case ASI_M_BYPASS: /* MMU passthrough */
699 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
700 /* These are always handled inline. */
701 g_assert_not_reached();
702 }
703 if (sign) {
704 switch (size) {
705 case 1:
706 ret = (int8_t) ret;
707 break;
708 case 2:
709 ret = (int16_t) ret;
710 break;
711 case 4:
712 ret = (int32_t) ret;
713 break;
714 default:
715 break;
716 }
717 }
718#ifdef DEBUG_ASI
719 dump_asi("read ", last_addr, asi, size, ret);
720#endif
721 return ret;
722}
723
724void helper_st_asi(CPUSPARCState *env, target_ulong addr, uint64_t val,
725 int asi, uint32_t memop)
726{
727 int size = 1 << (memop & MO_SIZE);
728 CPUState *cs = env_cpu(env);
729
730 do_check_align(env, addr, size - 1, GETPC());
731 switch (asi) {
732 case ASI_M_MXCC: /* SuperSparc MXCC registers, or... */
733 /* case ASI_LEON_CACHEREGS: Leon3 cache control */
734 switch (addr) {
735 case 0x00: /* Leon3 Cache Control */
736 case 0x08: /* Leon3 Instruction Cache config */
737 case 0x0C: /* Leon3 Date Cache config */
738 if (env->def.features & CPU_FEATURE_CACHE_CTRL) {
739 leon3_cache_control_st(env, addr, val, size);
740 }
741 break;
742
743 case 0x01c00000: /* MXCC stream data register 0 */
744 if (size == 8) {
745 env->mxccdata[0] = val;
746 } else {
747 qemu_log_mask(LOG_UNIMP,
748 "%08x: unimplemented access size: %d\n", addr,
749 size);
750 }
751 break;
752 case 0x01c00008: /* MXCC stream data register 1 */
753 if (size == 8) {
754 env->mxccdata[1] = val;
755 } else {
756 qemu_log_mask(LOG_UNIMP,
757 "%08x: unimplemented access size: %d\n", addr,
758 size);
759 }
760 break;
761 case 0x01c00010: /* MXCC stream data register 2 */
762 if (size == 8) {
763 env->mxccdata[2] = val;
764 } else {
765 qemu_log_mask(LOG_UNIMP,
766 "%08x: unimplemented access size: %d\n", addr,
767 size);
768 }
769 break;
770 case 0x01c00018: /* MXCC stream data register 3 */
771 if (size == 8) {
772 env->mxccdata[3] = val;
773 } else {
774 qemu_log_mask(LOG_UNIMP,
775 "%08x: unimplemented access size: %d\n", addr,
776 size);
777 }
778 break;
779 case 0x01c00100: /* MXCC stream source */
780 if (size == 8) {
781 env->mxccregs[0] = val;
782 } else {
783 qemu_log_mask(LOG_UNIMP,
784 "%08x: unimplemented access size: %d\n", addr,
785 size);
786 }
787 env->mxccdata[0] = ldq_phys(cs->as,
788 (env->mxccregs[0] & 0xffffffffULL) +
789 0);
790 env->mxccdata[1] = ldq_phys(cs->as,
791 (env->mxccregs[0] & 0xffffffffULL) +
792 8);
793 env->mxccdata[2] = ldq_phys(cs->as,
794 (env->mxccregs[0] & 0xffffffffULL) +
795 16);
796 env->mxccdata[3] = ldq_phys(cs->as,
797 (env->mxccregs[0] & 0xffffffffULL) +
798 24);
799 break;
800 case 0x01c00200: /* MXCC stream destination */
801 if (size == 8) {
802 env->mxccregs[1] = val;
803 } else {
804 qemu_log_mask(LOG_UNIMP,
805 "%08x: unimplemented access size: %d\n", addr,
806 size);
807 }
808 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 0,
809 env->mxccdata[0]);
810 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 8,
811 env->mxccdata[1]);
812 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 16,
813 env->mxccdata[2]);
814 stq_phys(cs->as, (env->mxccregs[1] & 0xffffffffULL) + 24,
815 env->mxccdata[3]);
816 break;
817 case 0x01c00a00: /* MXCC control register */
818 if (size == 8) {
819 env->mxccregs[3] = val;
820 } else {
821 qemu_log_mask(LOG_UNIMP,
822 "%08x: unimplemented access size: %d\n", addr,
823 size);
824 }
825 break;
826 case 0x01c00a04: /* MXCC control register */
827 if (size == 4) {
828 env->mxccregs[3] = (env->mxccregs[3] & 0xffffffff00000000ULL)
829 | val;
830 } else {
831 qemu_log_mask(LOG_UNIMP,
832 "%08x: unimplemented access size: %d\n", addr,
833 size);
834 }
835 break;
836 case 0x01c00e00: /* MXCC error register */
837 /* writing a 1 bit clears the error */
838 if (size == 8) {
839 env->mxccregs[6] &= ~val;
840 } else {
841 qemu_log_mask(LOG_UNIMP,
842 "%08x: unimplemented access size: %d\n", addr,
843 size);
844 }
845 break;
846 case 0x01c00f00: /* MBus port address register */
847 if (size == 8) {
848 env->mxccregs[7] = val;
849 } else {
850 qemu_log_mask(LOG_UNIMP,
851 "%08x: unimplemented access size: %d\n", addr,
852 size);
853 }
854 break;
855 default:
856 qemu_log_mask(LOG_UNIMP,
857 "%08x: unimplemented address, size: %d\n", addr,
858 size);
859 break;
860 }
861 DPRINTF_MXCC("asi = %d, size = %d, addr = %08x, val = %" PRIx64 "\n",
862 asi, size, addr, val);
863#ifdef DEBUG_MXCC
864 dump_mxcc(env);
865#endif
866 break;
867 case ASI_M_FLUSH_PROBE: /* SuperSparc MMU flush */
868 case ASI_LEON_MMUFLUSH: /* LEON3 MMU flush */
869 {
870 int mmulev;
871
872 mmulev = (addr >> 8) & 15;
873 DPRINTF_MMU("mmu flush level %d\n", mmulev);
874 switch (mmulev) {
875 case 0: /* flush page */
876 tlb_flush_page(cs, addr & 0xfffff000);
877 break;
878 case 1: /* flush segment (256k) */
879 case 2: /* flush region (16M) */
880 case 3: /* flush context (4G) */
881 case 4: /* flush entire */
882 tlb_flush(cs);
883 break;
884 default:
885 break;
886 }
887#ifdef DEBUG_MMU
888 dump_mmu(env);
889#endif
890 }
891 break;
892 case ASI_M_MMUREGS: /* write MMU regs */
893 case ASI_LEON_MMUREGS: /* LEON3 write MMU regs */
894 {
895 int reg = (addr >> 8) & 0x1f;
896 uint32_t oldreg;
897
898 oldreg = env->mmuregs[reg];
899 switch (reg) {
900 case 0: /* Control Register */
901 env->mmuregs[reg] = (env->mmuregs[reg] & 0xff000000) |
902 (val & 0x00ffffff);
903 /* Mappings generated during no-fault mode
904 are invalid in normal mode. */
905 if ((oldreg ^ env->mmuregs[reg])
906 & (MMU_NF | env->def.mmu_bm)) {
907 tlb_flush(cs);
908 }
909 break;
910 case 1: /* Context Table Pointer Register */
911 env->mmuregs[reg] = val & env->def.mmu_ctpr_mask;
912 break;
913 case 2: /* Context Register */
914 env->mmuregs[reg] = val & env->def.mmu_cxr_mask;
915 if (oldreg != env->mmuregs[reg]) {
916 /* we flush when the MMU context changes because
917 QEMU has no MMU context support */
918 tlb_flush(cs);
919 }
920 break;
921 case 3: /* Synchronous Fault Status Register with Clear */
922 case 4: /* Synchronous Fault Address Register */
923 break;
924 case 0x10: /* TLB Replacement Control Register */
925 env->mmuregs[reg] = val & env->def.mmu_trcr_mask;
926 break;
927 case 0x13: /* Synchronous Fault Status Register with Read
928 and Clear */
929 env->mmuregs[3] = val & env->def.mmu_sfsr_mask;
930 break;
931 case 0x14: /* Synchronous Fault Address Register */
932 env->mmuregs[4] = val;
933 break;
934 default:
935 env->mmuregs[reg] = val;
936 break;
937 }
938 if (oldreg != env->mmuregs[reg]) {
939 DPRINTF_MMU("mmu change reg[%d]: 0x%08x -> 0x%08x\n",
940 reg, oldreg, env->mmuregs[reg]);
941 }
942#ifdef DEBUG_MMU
943 dump_mmu(env);
944#endif
945 }
946 break;
947 case ASI_M_TLBDIAG: /* Turbosparc ITLB Diagnostic */
948 case ASI_M_DIAGS: /* Turbosparc DTLB Diagnostic */
949 case ASI_M_IODIAG: /* Turbosparc IOTLB Diagnostic */
950 break;
951 case ASI_M_TXTC_TAG: /* I-cache tag */
952 case ASI_M_TXTC_DATA: /* I-cache data */
953 case ASI_M_DATAC_TAG: /* D-cache tag */
954 case ASI_M_DATAC_DATA: /* D-cache data */
955 case ASI_M_FLUSH_PAGE: /* I/D-cache flush page */
956 case ASI_M_FLUSH_SEG: /* I/D-cache flush segment */
957 case ASI_M_FLUSH_REGION: /* I/D-cache flush region */
958 case ASI_M_FLUSH_CTX: /* I/D-cache flush context */
959 case ASI_M_FLUSH_USER: /* I/D-cache flush user */
960 break;
961 case 0x21 ... 0x2f: /* MMU passthrough, 0x100000000 to 0xfffffffff */
962 {
963 switch (size) {
964 case 1:
965 stb_phys(cs->as, (hwaddr)addr
966 | ((hwaddr)(asi & 0xf) << 32), val);
967 break;
968 case 2:
969 stw_phys(cs->as, (hwaddr)addr
970 | ((hwaddr)(asi & 0xf) << 32), val);
971 break;
972 case 4:
973 default:
974 stl_phys(cs->as, (hwaddr)addr
975 | ((hwaddr)(asi & 0xf) << 32), val);
976 break;
977 case 8:
978 stq_phys(cs->as, (hwaddr)addr
979 | ((hwaddr)(asi & 0xf) << 32), val);
980 break;
981 }
982 }
983 break;
984 case 0x30: /* store buffer tags or Turbosparc secondary cache diagnostic */
985 case 0x31: /* store buffer data, Ross RT620 I-cache flush or
986 Turbosparc snoop RAM */
987 case 0x32: /* store buffer control or Turbosparc page table
988 descriptor diagnostic */
989 case 0x36: /* I-cache flash clear */
990 case 0x37: /* D-cache flash clear */
991 break;
992 case 0x38: /* SuperSPARC MMU Breakpoint Control Registers*/
993 {
994 int reg = (addr >> 8) & 3;
995
996 switch (reg) {
997 case 0: /* Breakpoint Value (Addr) */
998 env->mmubpregs[reg] = (val & 0xfffffffffULL);
999 break;
1000 case 1: /* Breakpoint Mask */
1001 env->mmubpregs[reg] = (val & 0xfffffffffULL);
1002 break;
1003 case 2: /* Breakpoint Control */
1004 env->mmubpregs[reg] = (val & 0x7fULL);
1005 break;
1006 case 3: /* Breakpoint Status */
1007 env->mmubpregs[reg] = (val & 0xfULL);
1008 break;
1009 }
1010 DPRINTF_MMU("write breakpoint reg[%d] 0x%016x\n", reg,
1011 env->mmuregs[reg]);
1012 }
1013 break;
1014 case 0x49: /* SuperSPARC MMU Counter Breakpoint Value */
1015 env->mmubpctrv = val & 0xffffffff;
1016 break;
1017 case 0x4a: /* SuperSPARC MMU Counter Breakpoint Control */
1018 env->mmubpctrc = val & 0x3;
1019 break;
1020 case 0x4b: /* SuperSPARC MMU Counter Breakpoint Status */
1021 env->mmubpctrs = val & 0x3;
1022 break;
1023 case 0x4c: /* SuperSPARC MMU Breakpoint Action */
1024 env->mmubpaction = val & 0x1fff;
1025 break;
1026 case ASI_USERTXT: /* User code access, XXX */
1027 case ASI_KERNELTXT: /* Supervisor code access, XXX */
1028 default:
1029 cpu_unassigned_access(cs, addr, true, false, asi, size);
1030 break;
1031
1032 case ASI_USERDATA: /* User data access */
1033 case ASI_KERNELDATA: /* Supervisor data access */
1034 case ASI_P:
1035 case ASI_M_BYPASS: /* MMU passthrough */
1036 case ASI_LEON_BYPASS: /* LEON MMU passthrough */
1037 case ASI_M_BCOPY: /* Block copy, sta access */
1038 case ASI_M_BFILL: /* Block fill, stda access */
1039 /* These are always handled inline. */
1040 g_assert_not_reached();
1041 }
1042#ifdef DEBUG_ASI
1043 dump_asi("write", addr, asi, size, val);
1044#endif
1045}
1046
1047#endif /* CONFIG_USER_ONLY */
1048#else /* TARGET_SPARC64 */
1049
1050#ifdef CONFIG_USER_ONLY
1051uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
1052 int asi, uint32_t memop)
1053{
1054 int size = 1 << (memop & MO_SIZE);
1055 int sign = memop & MO_SIGN;
1056 uint64_t ret = 0;
1057
1058 if (asi < 0x80) {
1059 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
1060 }
1061 do_check_align(env, addr, size - 1, GETPC());
1062 addr = asi_address_mask(env, asi, addr);
1063
1064 switch (asi) {
1065 case ASI_PNF: /* Primary no-fault */
1066 case ASI_PNFL: /* Primary no-fault LE */
1067 case ASI_SNF: /* Secondary no-fault */
1068 case ASI_SNFL: /* Secondary no-fault LE */
1069 if (page_check_range(addr, size, PAGE_READ) == -1) {
1070 ret = 0;
1071 break;
1072 }
1073 switch (size) {
1074 case 1:
1075 ret = cpu_ldub_data(env, addr);
1076 break;
1077 case 2:
1078 ret = cpu_lduw_data(env, addr);
1079 break;
1080 case 4:
1081 ret = cpu_ldl_data(env, addr);
1082 break;
1083 case 8:
1084 ret = cpu_ldq_data(env, addr);
1085 break;
1086 default:
1087 g_assert_not_reached();
1088 }
1089 break;
1090 break;
1091
1092 case ASI_P: /* Primary */
1093 case ASI_PL: /* Primary LE */
1094 case ASI_S: /* Secondary */
1095 case ASI_SL: /* Secondary LE */
1096 /* These are always handled inline. */
1097 g_assert_not_reached();
1098
1099 default:
1100 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
1101 }
1102
1103 /* Convert from little endian */
1104 switch (asi) {
1105 case ASI_PNFL: /* Primary no-fault LE */
1106 case ASI_SNFL: /* Secondary no-fault LE */
1107 switch (size) {
1108 case 2:
1109 ret = bswap16(ret);
1110 break;
1111 case 4:
1112 ret = bswap32(ret);
1113 break;
1114 case 8:
1115 ret = bswap64(ret);
1116 break;
1117 }
1118 }
1119
1120 /* Convert to signed number */
1121 if (sign) {
1122 switch (size) {
1123 case 1:
1124 ret = (int8_t) ret;
1125 break;
1126 case 2:
1127 ret = (int16_t) ret;
1128 break;
1129 case 4:
1130 ret = (int32_t) ret;
1131 break;
1132 }
1133 }
1134#ifdef DEBUG_ASI
1135 dump_asi("read", addr, asi, size, ret);
1136#endif
1137 return ret;
1138}
1139
1140void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1141 int asi, uint32_t memop)
1142{
1143 int size = 1 << (memop & MO_SIZE);
1144#ifdef DEBUG_ASI
1145 dump_asi("write", addr, asi, size, val);
1146#endif
1147 if (asi < 0x80) {
1148 cpu_raise_exception_ra(env, TT_PRIV_ACT, GETPC());
1149 }
1150 do_check_align(env, addr, size - 1, GETPC());
1151
1152 switch (asi) {
1153 case ASI_P: /* Primary */
1154 case ASI_PL: /* Primary LE */
1155 case ASI_S: /* Secondary */
1156 case ASI_SL: /* Secondary LE */
1157 /* These are always handled inline. */
1158 g_assert_not_reached();
1159
1160 case ASI_PNF: /* Primary no-fault, RO */
1161 case ASI_SNF: /* Secondary no-fault, RO */
1162 case ASI_PNFL: /* Primary no-fault LE, RO */
1163 case ASI_SNFL: /* Secondary no-fault LE, RO */
1164 default:
1165 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
1166 }
1167}
1168
1169#else /* CONFIG_USER_ONLY */
1170
1171uint64_t helper_ld_asi(CPUSPARCState *env, target_ulong addr,
1172 int asi, uint32_t memop)
1173{
1174 int size = 1 << (memop & MO_SIZE);
1175 int sign = memop & MO_SIGN;
1176 CPUState *cs = env_cpu(env);
1177 uint64_t ret = 0;
1178#if defined(DEBUG_ASI)
1179 target_ulong last_addr = addr;
1180#endif
1181
1182 asi &= 0xff;
1183
1184 do_check_asi(env, asi, GETPC());
1185 do_check_align(env, addr, size - 1, GETPC());
1186 addr = asi_address_mask(env, asi, addr);
1187
1188 switch (asi) {
1189 case ASI_PNF:
1190 case ASI_PNFL:
1191 case ASI_SNF:
1192 case ASI_SNFL:
1193 {
1194 TCGMemOpIdx oi;
1195 int idx = (env->pstate & PS_PRIV
1196 ? (asi & 1 ? MMU_KERNEL_SECONDARY_IDX : MMU_KERNEL_IDX)
1197 : (asi & 1 ? MMU_USER_SECONDARY_IDX : MMU_USER_IDX));
1198
1199 if (cpu_get_phys_page_nofault(env, addr, idx) == -1ULL) {
1200#ifdef DEBUG_ASI
1201 dump_asi("read ", last_addr, asi, size, ret);
1202#endif
1203 /* exception_index is set in get_physical_address_data. */
1204 cpu_raise_exception_ra(env, cs->exception_index, GETPC());
1205 }
1206 oi = make_memop_idx(memop, idx);
1207 switch (size) {
1208 case 1:
1209 ret = helper_ret_ldub_mmu(env, addr, oi, GETPC());
1210 break;
1211 case 2:
1212 if (asi & 8) {
1213 ret = helper_le_lduw_mmu(env, addr, oi, GETPC());
1214 } else {
1215 ret = helper_be_lduw_mmu(env, addr, oi, GETPC());
1216 }
1217 break;
1218 case 4:
1219 if (asi & 8) {
1220 ret = helper_le_ldul_mmu(env, addr, oi, GETPC());
1221 } else {
1222 ret = helper_be_ldul_mmu(env, addr, oi, GETPC());
1223 }
1224 break;
1225 case 8:
1226 if (asi & 8) {
1227 ret = helper_le_ldq_mmu(env, addr, oi, GETPC());
1228 } else {
1229 ret = helper_be_ldq_mmu(env, addr, oi, GETPC());
1230 }
1231 break;
1232 default:
1233 g_assert_not_reached();
1234 }
1235 }
1236 break;
1237
1238 case ASI_AIUP: /* As if user primary */
1239 case ASI_AIUS: /* As if user secondary */
1240 case ASI_AIUPL: /* As if user primary LE */
1241 case ASI_AIUSL: /* As if user secondary LE */
1242 case ASI_P: /* Primary */
1243 case ASI_S: /* Secondary */
1244 case ASI_PL: /* Primary LE */
1245 case ASI_SL: /* Secondary LE */
1246 case ASI_REAL: /* Bypass */
1247 case ASI_REAL_IO: /* Bypass, non-cacheable */
1248 case ASI_REAL_L: /* Bypass LE */
1249 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1250 case ASI_N: /* Nucleus */
1251 case ASI_NL: /* Nucleus Little Endian (LE) */
1252 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
1253 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
1254 case ASI_TWINX_AIUP: /* As if user primary, twinx */
1255 case ASI_TWINX_AIUS: /* As if user secondary, twinx */
1256 case ASI_TWINX_REAL: /* Real address, twinx */
1257 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
1258 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
1259 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1260 case ASI_TWINX_N: /* Nucleus, twinx */
1261 case ASI_TWINX_NL: /* Nucleus, twinx, LE */
1262 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
1263 case ASI_TWINX_P: /* Primary, twinx */
1264 case ASI_TWINX_PL: /* Primary, twinx, LE */
1265 case ASI_TWINX_S: /* Secondary, twinx */
1266 case ASI_TWINX_SL: /* Secondary, twinx, LE */
1267 /* These are always handled inline. */
1268 g_assert_not_reached();
1269
1270 case ASI_UPA_CONFIG: /* UPA config */
1271 /* XXX */
1272 break;
1273 case ASI_LSU_CONTROL: /* LSU */
1274 ret = env->lsu;
1275 break;
1276 case ASI_IMMU: /* I-MMU regs */
1277 {
1278 int reg = (addr >> 3) & 0xf;
1279 switch (reg) {
1280 case 0:
1281 /* 0x00 I-TSB Tag Target register */
1282 ret = ultrasparc_tag_target(env->immu.tag_access);
1283 break;
1284 case 3: /* SFSR */
1285 ret = env->immu.sfsr;
1286 break;
1287 case 5: /* TSB access */
1288 ret = env->immu.tsb;
1289 break;
1290 case 6:
1291 /* 0x30 I-TSB Tag Access register */
1292 ret = env->immu.tag_access;
1293 break;
1294 default:
1295 cpu_unassigned_access(cs, addr, false, false, 1, size);
1296 ret = 0;
1297 }
1298 break;
1299 }
1300 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer */
1301 {
1302 /* env->immuregs[5] holds I-MMU TSB register value
1303 env->immuregs[6] holds I-MMU Tag Access register value */
1304 ret = ultrasparc_tsb_pointer(env, &env->immu, 0);
1305 break;
1306 }
1307 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer */
1308 {
1309 /* env->immuregs[5] holds I-MMU TSB register value
1310 env->immuregs[6] holds I-MMU Tag Access register value */
1311 ret = ultrasparc_tsb_pointer(env, &env->immu, 1);
1312 break;
1313 }
1314 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
1315 {
1316 int reg = (addr >> 3) & 0x3f;
1317
1318 ret = env->itlb[reg].tte;
1319 break;
1320 }
1321 case ASI_ITLB_TAG_READ: /* I-MMU tag read */
1322 {
1323 int reg = (addr >> 3) & 0x3f;
1324
1325 ret = env->itlb[reg].tag;
1326 break;
1327 }
1328 case ASI_DMMU: /* D-MMU regs */
1329 {
1330 int reg = (addr >> 3) & 0xf;
1331 switch (reg) {
1332 case 0:
1333 /* 0x00 D-TSB Tag Target register */
1334 ret = ultrasparc_tag_target(env->dmmu.tag_access);
1335 break;
1336 case 1: /* 0x08 Primary Context */
1337 ret = env->dmmu.mmu_primary_context;
1338 break;
1339 case 2: /* 0x10 Secondary Context */
1340 ret = env->dmmu.mmu_secondary_context;
1341 break;
1342 case 3: /* SFSR */
1343 ret = env->dmmu.sfsr;
1344 break;
1345 case 4: /* 0x20 SFAR */
1346 ret = env->dmmu.sfar;
1347 break;
1348 case 5: /* 0x28 TSB access */
1349 ret = env->dmmu.tsb;
1350 break;
1351 case 6: /* 0x30 D-TSB Tag Access register */
1352 ret = env->dmmu.tag_access;
1353 break;
1354 case 7:
1355 ret = env->dmmu.virtual_watchpoint;
1356 break;
1357 case 8:
1358 ret = env->dmmu.physical_watchpoint;
1359 break;
1360 default:
1361 cpu_unassigned_access(cs, addr, false, false, 1, size);
1362 ret = 0;
1363 }
1364 break;
1365 }
1366 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer */
1367 {
1368 /* env->dmmuregs[5] holds D-MMU TSB register value
1369 env->dmmuregs[6] holds D-MMU Tag Access register value */
1370 ret = ultrasparc_tsb_pointer(env, &env->dmmu, 0);
1371 break;
1372 }
1373 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer */
1374 {
1375 /* env->dmmuregs[5] holds D-MMU TSB register value
1376 env->dmmuregs[6] holds D-MMU Tag Access register value */
1377 ret = ultrasparc_tsb_pointer(env, &env->dmmu, 1);
1378 break;
1379 }
1380 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
1381 {
1382 int reg = (addr >> 3) & 0x3f;
1383
1384 ret = env->dtlb[reg].tte;
1385 break;
1386 }
1387 case ASI_DTLB_TAG_READ: /* D-MMU tag read */
1388 {
1389 int reg = (addr >> 3) & 0x3f;
1390
1391 ret = env->dtlb[reg].tag;
1392 break;
1393 }
1394 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
1395 break;
1396 case ASI_INTR_RECEIVE: /* Interrupt data receive */
1397 ret = env->ivec_status;
1398 break;
1399 case ASI_INTR_R: /* Incoming interrupt vector, RO */
1400 {
1401 int reg = (addr >> 4) & 0x3;
1402 if (reg < 3) {
1403 ret = env->ivec_data[reg];
1404 }
1405 break;
1406 }
1407 case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
1408 if (unlikely((addr >= 0x20) && (addr < 0x30))) {
1409 /* Hyperprivileged access only */
1410 cpu_unassigned_access(cs, addr, false, false, 1, size);
1411 }
1412 /* fall through */
1413 case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
1414 {
1415 unsigned int i = (addr >> 3) & 0x7;
1416 ret = env->scratch[i];
1417 break;
1418 }
1419 case ASI_MMU: /* UA2005 Context ID registers */
1420 switch ((addr >> 3) & 0x3) {
1421 case 1:
1422 ret = env->dmmu.mmu_primary_context;
1423 break;
1424 case 2:
1425 ret = env->dmmu.mmu_secondary_context;
1426 break;
1427 default:
1428 cpu_unassigned_access(cs, addr, true, false, 1, size);
1429 }
1430 break;
1431 case ASI_DCACHE_DATA: /* D-cache data */
1432 case ASI_DCACHE_TAG: /* D-cache tag access */
1433 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
1434 case ASI_AFSR: /* E-cache asynchronous fault status */
1435 case ASI_AFAR: /* E-cache asynchronous fault address */
1436 case ASI_EC_TAG_DATA: /* E-cache tag data */
1437 case ASI_IC_INSTR: /* I-cache instruction access */
1438 case ASI_IC_TAG: /* I-cache tag access */
1439 case ASI_IC_PRE_DECODE: /* I-cache predecode */
1440 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
1441 case ASI_EC_W: /* E-cache tag */
1442 case ASI_EC_R: /* E-cache tag */
1443 break;
1444 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer */
1445 case ASI_ITLB_DATA_IN: /* I-MMU data in, WO */
1446 case ASI_IMMU_DEMAP: /* I-MMU demap, WO */
1447 case ASI_DTLB_DATA_IN: /* D-MMU data in, WO */
1448 case ASI_DMMU_DEMAP: /* D-MMU demap, WO */
1449 case ASI_INTR_W: /* Interrupt vector, WO */
1450 default:
1451 cpu_unassigned_access(cs, addr, false, false, 1, size);
1452 ret = 0;
1453 break;
1454 }
1455
1456 /* Convert to signed number */
1457 if (sign) {
1458 switch (size) {
1459 case 1:
1460 ret = (int8_t) ret;
1461 break;
1462 case 2:
1463 ret = (int16_t) ret;
1464 break;
1465 case 4:
1466 ret = (int32_t) ret;
1467 break;
1468 default:
1469 break;
1470 }
1471 }
1472#ifdef DEBUG_ASI
1473 dump_asi("read ", last_addr, asi, size, ret);
1474#endif
1475 return ret;
1476}
1477
1478void helper_st_asi(CPUSPARCState *env, target_ulong addr, target_ulong val,
1479 int asi, uint32_t memop)
1480{
1481 int size = 1 << (memop & MO_SIZE);
1482 CPUState *cs = env_cpu(env);
1483
1484#ifdef DEBUG_ASI
1485 dump_asi("write", addr, asi, size, val);
1486#endif
1487
1488 asi &= 0xff;
1489
1490 do_check_asi(env, asi, GETPC());
1491 do_check_align(env, addr, size - 1, GETPC());
1492 addr = asi_address_mask(env, asi, addr);
1493
1494 switch (asi) {
1495 case ASI_AIUP: /* As if user primary */
1496 case ASI_AIUS: /* As if user secondary */
1497 case ASI_AIUPL: /* As if user primary LE */
1498 case ASI_AIUSL: /* As if user secondary LE */
1499 case ASI_P: /* Primary */
1500 case ASI_S: /* Secondary */
1501 case ASI_PL: /* Primary LE */
1502 case ASI_SL: /* Secondary LE */
1503 case ASI_REAL: /* Bypass */
1504 case ASI_REAL_IO: /* Bypass, non-cacheable */
1505 case ASI_REAL_L: /* Bypass LE */
1506 case ASI_REAL_IO_L: /* Bypass, non-cacheable LE */
1507 case ASI_N: /* Nucleus */
1508 case ASI_NL: /* Nucleus Little Endian (LE) */
1509 case ASI_NUCLEUS_QUAD_LDD: /* Nucleus quad LDD 128 bit atomic */
1510 case ASI_NUCLEUS_QUAD_LDD_L: /* Nucleus quad LDD 128 bit atomic LE */
1511 case ASI_TWINX_AIUP: /* As if user primary, twinx */
1512 case ASI_TWINX_AIUS: /* As if user secondary, twinx */
1513 case ASI_TWINX_REAL: /* Real address, twinx */
1514 case ASI_TWINX_AIUP_L: /* As if user primary, twinx, LE */
1515 case ASI_TWINX_AIUS_L: /* As if user secondary, twinx, LE */
1516 case ASI_TWINX_REAL_L: /* Real address, twinx, LE */
1517 case ASI_TWINX_N: /* Nucleus, twinx */
1518 case ASI_TWINX_NL: /* Nucleus, twinx, LE */
1519 /* ??? From the UA2011 document; overlaps BLK_INIT_QUAD_LDD_* */
1520 case ASI_TWINX_P: /* Primary, twinx */
1521 case ASI_TWINX_PL: /* Primary, twinx, LE */
1522 case ASI_TWINX_S: /* Secondary, twinx */
1523 case ASI_TWINX_SL: /* Secondary, twinx, LE */
1524 /* These are always handled inline. */
1525 g_assert_not_reached();
1526 /* these ASIs have different functions on UltraSPARC-IIIi
1527 * and UA2005 CPUs. Use the explicit numbers to avoid confusion
1528 */
1529 case 0x31:
1530 case 0x32:
1531 case 0x39:
1532 case 0x3a:
1533 if (cpu_has_hypervisor(env)) {
1534 /* UA2005
1535 * ASI_DMMU_CTX_ZERO_TSB_BASE_PS0
1536 * ASI_DMMU_CTX_ZERO_TSB_BASE_PS1
1537 * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS0
1538 * ASI_DMMU_CTX_NONZERO_TSB_BASE_PS1
1539 */
1540 int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
1541 env->dmmu.sun4v_tsb_pointers[idx] = val;
1542 } else {
1543 helper_raise_exception(env, TT_ILL_INSN);
1544 }
1545 break;
1546 case 0x33:
1547 case 0x3b:
1548 if (cpu_has_hypervisor(env)) {
1549 /* UA2005
1550 * ASI_DMMU_CTX_ZERO_CONFIG
1551 * ASI_DMMU_CTX_NONZERO_CONFIG
1552 */
1553 env->dmmu.sun4v_ctx_config[(asi & 8) >> 3] = val;
1554 } else {
1555 helper_raise_exception(env, TT_ILL_INSN);
1556 }
1557 break;
1558 case 0x35:
1559 case 0x36:
1560 case 0x3d:
1561 case 0x3e:
1562 if (cpu_has_hypervisor(env)) {
1563 /* UA2005
1564 * ASI_IMMU_CTX_ZERO_TSB_BASE_PS0
1565 * ASI_IMMU_CTX_ZERO_TSB_BASE_PS1
1566 * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS0
1567 * ASI_IMMU_CTX_NONZERO_TSB_BASE_PS1
1568 */
1569 int idx = ((asi & 2) >> 1) | ((asi & 8) >> 2);
1570 env->immu.sun4v_tsb_pointers[idx] = val;
1571 } else {
1572 helper_raise_exception(env, TT_ILL_INSN);
1573 }
1574 break;
1575 case 0x37:
1576 case 0x3f:
1577 if (cpu_has_hypervisor(env)) {
1578 /* UA2005
1579 * ASI_IMMU_CTX_ZERO_CONFIG
1580 * ASI_IMMU_CTX_NONZERO_CONFIG
1581 */
1582 env->immu.sun4v_ctx_config[(asi & 8) >> 3] = val;
1583 } else {
1584 helper_raise_exception(env, TT_ILL_INSN);
1585 }
1586 break;
1587 case ASI_UPA_CONFIG: /* UPA config */
1588 /* XXX */
1589 return;
1590 case ASI_LSU_CONTROL: /* LSU */
1591 env->lsu = val & (DMMU_E | IMMU_E);
1592 return;
1593 case ASI_IMMU: /* I-MMU regs */
1594 {
1595 int reg = (addr >> 3) & 0xf;
1596 uint64_t oldreg;
1597
1598 oldreg = env->immu.mmuregs[reg];
1599 switch (reg) {
1600 case 0: /* RO */
1601 return;
1602 case 1: /* Not in I-MMU */
1603 case 2:
1604 return;
1605 case 3: /* SFSR */
1606 if ((val & 1) == 0) {
1607 val = 0; /* Clear SFSR */
1608 }
1609 env->immu.sfsr = val;
1610 break;
1611 case 4: /* RO */
1612 return;
1613 case 5: /* TSB access */
1614 DPRINTF_MMU("immu TSB write: 0x%016" PRIx64 " -> 0x%016"
1615 PRIx64 "\n", env->immu.tsb, val);
1616 env->immu.tsb = val;
1617 break;
1618 case 6: /* Tag access */
1619 env->immu.tag_access = val;
1620 break;
1621 case 7:
1622 case 8:
1623 return;
1624 default:
1625 cpu_unassigned_access(cs, addr, true, false, 1, size);
1626 break;
1627 }
1628
1629 if (oldreg != env->immu.mmuregs[reg]) {
1630 DPRINTF_MMU("immu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1631 PRIx64 "\n", reg, oldreg, env->immuregs[reg]);
1632 }
1633#ifdef DEBUG_MMU
1634 dump_mmu(env);
1635#endif
1636 return;
1637 }
1638 case ASI_ITLB_DATA_IN: /* I-MMU data in */
1639 /* ignore real translation entries */
1640 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1641 replace_tlb_1bit_lru(env->itlb, env->immu.tag_access,
1642 val, "immu", env, addr);
1643 }
1644 return;
1645 case ASI_ITLB_DATA_ACCESS: /* I-MMU data access */
1646 {
1647 /* TODO: auto demap */
1648
1649 unsigned int i = (addr >> 3) & 0x3f;
1650
1651 /* ignore real translation entries */
1652 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1653 replace_tlb_entry(&env->itlb[i], env->immu.tag_access,
1654 sun4v_tte_to_sun4u(env, addr, val), env);
1655 }
1656#ifdef DEBUG_MMU
1657 DPRINTF_MMU("immu data access replaced entry [%i]\n", i);
1658 dump_mmu(env);
1659#endif
1660 return;
1661 }
1662 case ASI_IMMU_DEMAP: /* I-MMU demap */
1663 demap_tlb(env->itlb, addr, "immu", env);
1664 return;
1665 case ASI_DMMU: /* D-MMU regs */
1666 {
1667 int reg = (addr >> 3) & 0xf;
1668 uint64_t oldreg;
1669
1670 oldreg = env->dmmu.mmuregs[reg];
1671 switch (reg) {
1672 case 0: /* RO */
1673 case 4:
1674 return;
1675 case 3: /* SFSR */
1676 if ((val & 1) == 0) {
1677 val = 0; /* Clear SFSR, Fault address */
1678 env->dmmu.sfar = 0;
1679 }
1680 env->dmmu.sfsr = val;
1681 break;
1682 case 1: /* Primary context */
1683 env->dmmu.mmu_primary_context = val;
1684 /* can be optimized to only flush MMU_USER_IDX
1685 and MMU_KERNEL_IDX entries */
1686 tlb_flush(cs);
1687 break;
1688 case 2: /* Secondary context */
1689 env->dmmu.mmu_secondary_context = val;
1690 /* can be optimized to only flush MMU_USER_SECONDARY_IDX
1691 and MMU_KERNEL_SECONDARY_IDX entries */
1692 tlb_flush(cs);
1693 break;
1694 case 5: /* TSB access */
1695 DPRINTF_MMU("dmmu TSB write: 0x%016" PRIx64 " -> 0x%016"
1696 PRIx64 "\n", env->dmmu.tsb, val);
1697 env->dmmu.tsb = val;
1698 break;
1699 case 6: /* Tag access */
1700 env->dmmu.tag_access = val;
1701 break;
1702 case 7: /* Virtual Watchpoint */
1703 env->dmmu.virtual_watchpoint = val;
1704 break;
1705 case 8: /* Physical Watchpoint */
1706 env->dmmu.physical_watchpoint = val;
1707 break;
1708 default:
1709 cpu_unassigned_access(cs, addr, true, false, 1, size);
1710 break;
1711 }
1712
1713 if (oldreg != env->dmmu.mmuregs[reg]) {
1714 DPRINTF_MMU("dmmu change reg[%d]: 0x%016" PRIx64 " -> 0x%016"
1715 PRIx64 "\n", reg, oldreg, env->dmmuregs[reg]);
1716 }
1717#ifdef DEBUG_MMU
1718 dump_mmu(env);
1719#endif
1720 return;
1721 }
1722 case ASI_DTLB_DATA_IN: /* D-MMU data in */
1723 /* ignore real translation entries */
1724 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1725 replace_tlb_1bit_lru(env->dtlb, env->dmmu.tag_access,
1726 val, "dmmu", env, addr);
1727 }
1728 return;
1729 case ASI_DTLB_DATA_ACCESS: /* D-MMU data access */
1730 {
1731 unsigned int i = (addr >> 3) & 0x3f;
1732
1733 /* ignore real translation entries */
1734 if (!(addr & TLB_UST1_IS_REAL_BIT)) {
1735 replace_tlb_entry(&env->dtlb[i], env->dmmu.tag_access,
1736 sun4v_tte_to_sun4u(env, addr, val), env);
1737 }
1738#ifdef DEBUG_MMU
1739 DPRINTF_MMU("dmmu data access replaced entry [%i]\n", i);
1740 dump_mmu(env);
1741#endif
1742 return;
1743 }
1744 case ASI_DMMU_DEMAP: /* D-MMU demap */
1745 demap_tlb(env->dtlb, addr, "dmmu", env);
1746 return;
1747 case ASI_INTR_RECEIVE: /* Interrupt data receive */
1748 env->ivec_status = val & 0x20;
1749 return;
1750 case ASI_SCRATCHPAD: /* UA2005 privileged scratchpad */
1751 if (unlikely((addr >= 0x20) && (addr < 0x30))) {
1752 /* Hyperprivileged access only */
1753 cpu_unassigned_access(cs, addr, true, false, 1, size);
1754 }
1755 /* fall through */
1756 case ASI_HYP_SCRATCHPAD: /* UA2005 hyperprivileged scratchpad */
1757 {
1758 unsigned int i = (addr >> 3) & 0x7;
1759 env->scratch[i] = val;
1760 return;
1761 }
1762 case ASI_MMU: /* UA2005 Context ID registers */
1763 {
1764 switch ((addr >> 3) & 0x3) {
1765 case 1:
1766 env->dmmu.mmu_primary_context = val;
1767 env->immu.mmu_primary_context = val;
1768 tlb_flush_by_mmuidx(cs,
1769 (1 << MMU_USER_IDX) | (1 << MMU_KERNEL_IDX));
1770 break;
1771 case 2:
1772 env->dmmu.mmu_secondary_context = val;
1773 env->immu.mmu_secondary_context = val;
1774 tlb_flush_by_mmuidx(cs,
1775 (1 << MMU_USER_SECONDARY_IDX) |
1776 (1 << MMU_KERNEL_SECONDARY_IDX));
1777 break;
1778 default:
1779 cpu_unassigned_access(cs, addr, true, false, 1, size);
1780 }
1781 }
1782 return;
1783 case ASI_QUEUE: /* UA2005 CPU mondo queue */
1784 case ASI_DCACHE_DATA: /* D-cache data */
1785 case ASI_DCACHE_TAG: /* D-cache tag access */
1786 case ASI_ESTATE_ERROR_EN: /* E-cache error enable */
1787 case ASI_AFSR: /* E-cache asynchronous fault status */
1788 case ASI_AFAR: /* E-cache asynchronous fault address */
1789 case ASI_EC_TAG_DATA: /* E-cache tag data */
1790 case ASI_IC_INSTR: /* I-cache instruction access */
1791 case ASI_IC_TAG: /* I-cache tag access */
1792 case ASI_IC_PRE_DECODE: /* I-cache predecode */
1793 case ASI_IC_NEXT_FIELD: /* I-cache LRU etc. */
1794 case ASI_EC_W: /* E-cache tag */
1795 case ASI_EC_R: /* E-cache tag */
1796 return;
1797 case ASI_IMMU_TSB_8KB_PTR: /* I-MMU 8k TSB pointer, RO */
1798 case ASI_IMMU_TSB_64KB_PTR: /* I-MMU 64k TSB pointer, RO */
1799 case ASI_ITLB_TAG_READ: /* I-MMU tag read, RO */
1800 case ASI_DMMU_TSB_8KB_PTR: /* D-MMU 8k TSB pointer, RO */
1801 case ASI_DMMU_TSB_64KB_PTR: /* D-MMU 64k TSB pointer, RO */
1802 case ASI_DMMU_TSB_DIRECT_PTR: /* D-MMU data pointer, RO */
1803 case ASI_DTLB_TAG_READ: /* D-MMU tag read, RO */
1804 case ASI_INTR_DISPATCH_STAT: /* Interrupt dispatch, RO */
1805 case ASI_INTR_R: /* Incoming interrupt vector, RO */
1806 case ASI_PNF: /* Primary no-fault, RO */
1807 case ASI_SNF: /* Secondary no-fault, RO */
1808 case ASI_PNFL: /* Primary no-fault LE, RO */
1809 case ASI_SNFL: /* Secondary no-fault LE, RO */
1810 default:
1811 cpu_unassigned_access(cs, addr, true, false, 1, size);
1812 return;
1813 }
1814}
1815#endif /* CONFIG_USER_ONLY */
1816#endif /* TARGET_SPARC64 */
1817
1818#if !defined(CONFIG_USER_ONLY)
1819#ifndef TARGET_SPARC64
1820void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
1821 bool is_write, bool is_exec, int is_asi,
1822 unsigned size)
1823{
1824 SPARCCPU *cpu = SPARC_CPU(cs);
1825 CPUSPARCState *env = &cpu->env;
1826 int fault_type;
1827
1828#ifdef DEBUG_UNASSIGNED
1829 if (is_asi) {
1830 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
1831 " asi 0x%02x from " TARGET_FMT_lx "\n",
1832 is_exec ? "exec" : is_write ? "write" : "read", size,
1833 size == 1 ? "" : "s", addr, is_asi, env->pc);
1834 } else {
1835 printf("Unassigned mem %s access of %d byte%s to " TARGET_FMT_plx
1836 " from " TARGET_FMT_lx "\n",
1837 is_exec ? "exec" : is_write ? "write" : "read", size,
1838 size == 1 ? "" : "s", addr, env->pc);
1839 }
1840#endif
1841 /* Don't overwrite translation and access faults */
1842 fault_type = (env->mmuregs[3] & 0x1c) >> 2;
1843 if ((fault_type > 4) || (fault_type == 0)) {
1844 env->mmuregs[3] = 0; /* Fault status register */
1845 if (is_asi) {
1846 env->mmuregs[3] |= 1 << 16;
1847 }
1848 if (env->psrs) {
1849 env->mmuregs[3] |= 1 << 5;
1850 }
1851 if (is_exec) {
1852 env->mmuregs[3] |= 1 << 6;
1853 }
1854 if (is_write) {
1855 env->mmuregs[3] |= 1 << 7;
1856 }
1857 env->mmuregs[3] |= (5 << 2) | 2;
1858 /* SuperSPARC will never place instruction fault addresses in the FAR */
1859 if (!is_exec) {
1860 env->mmuregs[4] = addr; /* Fault address register */
1861 }
1862 }
1863 /* overflow (same type fault was not read before another fault) */
1864 if (fault_type == ((env->mmuregs[3] & 0x1c)) >> 2) {
1865 env->mmuregs[3] |= 1;
1866 }
1867
1868 if ((env->mmuregs[0] & MMU_E) && !(env->mmuregs[0] & MMU_NF)) {
1869 int tt = is_exec ? TT_CODE_ACCESS : TT_DATA_ACCESS;
1870 cpu_raise_exception_ra(env, tt, GETPC());
1871 }
1872
1873 /* flush neverland mappings created during no-fault mode,
1874 so the sequential MMU faults report proper fault types */
1875 if (env->mmuregs[0] & MMU_NF) {
1876 tlb_flush(cs);
1877 }
1878}
1879#else
1880void sparc_cpu_unassigned_access(CPUState *cs, hwaddr addr,
1881 bool is_write, bool is_exec, int is_asi,
1882 unsigned size)
1883{
1884 SPARCCPU *cpu = SPARC_CPU(cs);
1885 CPUSPARCState *env = &cpu->env;
1886
1887#ifdef DEBUG_UNASSIGNED
1888 printf("Unassigned mem access to " TARGET_FMT_plx " from " TARGET_FMT_lx
1889 "\n", addr, env->pc);
1890#endif
1891
1892 if (is_exec) { /* XXX has_hypervisor */
1893 if (env->lsu & (IMMU_E)) {
1894 cpu_raise_exception_ra(env, TT_CODE_ACCESS, GETPC());
1895 } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
1896 cpu_raise_exception_ra(env, TT_INSN_REAL_TRANSLATION_MISS, GETPC());
1897 }
1898 } else {
1899 if (env->lsu & (DMMU_E)) {
1900 cpu_raise_exception_ra(env, TT_DATA_ACCESS, GETPC());
1901 } else if (cpu_has_hypervisor(env) && !(env->hpstate & HS_PRIV)) {
1902 cpu_raise_exception_ra(env, TT_DATA_REAL_TRANSLATION_MISS, GETPC());
1903 }
1904 }
1905}
1906#endif
1907#endif
1908
1909#if !defined(CONFIG_USER_ONLY)
1910void QEMU_NORETURN sparc_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
1911 MMUAccessType access_type,
1912 int mmu_idx,
1913 uintptr_t retaddr)
1914{
1915 SPARCCPU *cpu = SPARC_CPU(cs);
1916 CPUSPARCState *env = &cpu->env;
1917
1918#ifdef DEBUG_UNALIGNED
1919 printf("Unaligned access to 0x" TARGET_FMT_lx " from 0x" TARGET_FMT_lx
1920 "\n", addr, env->pc);
1921#endif
1922 cpu_raise_exception_ra(env, TT_UNALIGNED, retaddr);
1923}
1924#endif
1925