1 | /* |
2 | * Copyright (c) 2011 - 2019, Max Filippov, Open Source and Linux Lab. |
3 | * All rights reserved. |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions are met: |
7 | * * Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * * Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * * Neither the name of the Open Source and Linux Lab nor the |
13 | * names of its contributors may be used to endorse or promote products |
14 | * derived from this software without specific prior written permission. |
15 | * |
16 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
17 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
19 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY |
20 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
21 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; |
22 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
23 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
25 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | */ |
27 | |
28 | #include "qemu/osdep.h" |
29 | #include "qemu/main-loop.h" |
30 | #include "qemu/qemu-print.h" |
31 | #include "qemu/units.h" |
32 | #include "cpu.h" |
33 | #include "exec/helper-proto.h" |
34 | #include "qemu/host-utils.h" |
35 | #include "exec/exec-all.h" |
36 | #include "exec/cpu_ldst.h" |
37 | |
38 | #define XTENSA_MPU_SEGMENT_MASK 0x0000001f |
39 | #define XTENSA_MPU_ACC_RIGHTS_MASK 0x00000f00 |
40 | #define XTENSA_MPU_ACC_RIGHTS_SHIFT 8 |
41 | #define XTENSA_MPU_MEM_TYPE_MASK 0x001ff000 |
42 | #define XTENSA_MPU_MEM_TYPE_SHIFT 12 |
43 | #define XTENSA_MPU_ATTR_MASK 0x001fff00 |
44 | |
45 | #define XTENSA_MPU_PROBE_B 0x40000000 |
46 | #define XTENSA_MPU_PROBE_V 0x80000000 |
47 | |
48 | #define XTENSA_MPU_SYSTEM_TYPE_DEVICE 0x0001 |
49 | #define XTENSA_MPU_SYSTEM_TYPE_NC 0x0002 |
50 | #define XTENSA_MPU_SYSTEM_TYPE_C 0x0003 |
51 | #define XTENSA_MPU_SYSTEM_TYPE_MASK 0x0003 |
52 | |
53 | #define XTENSA_MPU_TYPE_SYS_C 0x0010 |
54 | #define XTENSA_MPU_TYPE_SYS_W 0x0020 |
55 | #define XTENSA_MPU_TYPE_SYS_R 0x0040 |
56 | #define XTENSA_MPU_TYPE_CPU_C 0x0100 |
57 | #define XTENSA_MPU_TYPE_CPU_W 0x0200 |
58 | #define XTENSA_MPU_TYPE_CPU_R 0x0400 |
59 | #define XTENSA_MPU_TYPE_CPU_CACHE 0x0800 |
60 | #define XTENSA_MPU_TYPE_B 0x1000 |
61 | #define XTENSA_MPU_TYPE_INT 0x2000 |
62 | |
63 | void HELPER(itlb_hit_test)(CPUXtensaState *env, uint32_t vaddr) |
64 | { |
65 | /* |
66 | * Attempt the memory load; we don't care about the result but |
67 | * only the side-effects (ie any MMU or other exception) |
68 | */ |
69 | cpu_ldub_code_ra(env, vaddr, GETPC()); |
70 | } |
71 | |
72 | void HELPER(wsr_rasid)(CPUXtensaState *env, uint32_t v) |
73 | { |
74 | v = (v & 0xffffff00) | 0x1; |
75 | if (v != env->sregs[RASID]) { |
76 | env->sregs[RASID] = v; |
77 | tlb_flush(env_cpu(env)); |
78 | } |
79 | } |
80 | |
81 | static uint32_t get_page_size(const CPUXtensaState *env, |
82 | bool dtlb, uint32_t way) |
83 | { |
84 | uint32_t tlbcfg = env->sregs[dtlb ? DTLBCFG : ITLBCFG]; |
85 | |
86 | switch (way) { |
87 | case 4: |
88 | return (tlbcfg >> 16) & 0x3; |
89 | |
90 | case 5: |
91 | return (tlbcfg >> 20) & 0x1; |
92 | |
93 | case 6: |
94 | return (tlbcfg >> 24) & 0x1; |
95 | |
96 | default: |
97 | return 0; |
98 | } |
99 | } |
100 | |
101 | /*! |
102 | * Get bit mask for the virtual address bits translated by the TLB way |
103 | */ |
104 | static uint32_t xtensa_tlb_get_addr_mask(const CPUXtensaState *env, |
105 | bool dtlb, uint32_t way) |
106 | { |
107 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { |
108 | bool varway56 = dtlb ? |
109 | env->config->dtlb.varway56 : |
110 | env->config->itlb.varway56; |
111 | |
112 | switch (way) { |
113 | case 4: |
114 | return 0xfff00000 << get_page_size(env, dtlb, way) * 2; |
115 | |
116 | case 5: |
117 | if (varway56) { |
118 | return 0xf8000000 << get_page_size(env, dtlb, way); |
119 | } else { |
120 | return 0xf8000000; |
121 | } |
122 | |
123 | case 6: |
124 | if (varway56) { |
125 | return 0xf0000000 << (1 - get_page_size(env, dtlb, way)); |
126 | } else { |
127 | return 0xf0000000; |
128 | } |
129 | |
130 | default: |
131 | return 0xfffff000; |
132 | } |
133 | } else { |
134 | return REGION_PAGE_MASK; |
135 | } |
136 | } |
137 | |
138 | /*! |
139 | * Get bit mask for the 'VPN without index' field. |
140 | * See ISA, 4.6.5.6, data format for RxTLB0 |
141 | */ |
142 | static uint32_t get_vpn_mask(const CPUXtensaState *env, bool dtlb, uint32_t way) |
143 | { |
144 | if (way < 4) { |
145 | bool is32 = (dtlb ? |
146 | env->config->dtlb.nrefillentries : |
147 | env->config->itlb.nrefillentries) == 32; |
148 | return is32 ? 0xffff8000 : 0xffffc000; |
149 | } else if (way == 4) { |
150 | return xtensa_tlb_get_addr_mask(env, dtlb, way) << 2; |
151 | } else if (way <= 6) { |
152 | uint32_t mask = xtensa_tlb_get_addr_mask(env, dtlb, way); |
153 | bool varway56 = dtlb ? |
154 | env->config->dtlb.varway56 : |
155 | env->config->itlb.varway56; |
156 | |
157 | if (varway56) { |
158 | return mask << (way == 5 ? 2 : 3); |
159 | } else { |
160 | return mask << 1; |
161 | } |
162 | } else { |
163 | return 0xfffff000; |
164 | } |
165 | } |
166 | |
167 | /*! |
168 | * Split virtual address into VPN (with index) and entry index |
169 | * for the given TLB way |
170 | */ |
171 | static void split_tlb_entry_spec_way(const CPUXtensaState *env, uint32_t v, |
172 | bool dtlb, uint32_t *vpn, |
173 | uint32_t wi, uint32_t *ei) |
174 | { |
175 | bool varway56 = dtlb ? |
176 | env->config->dtlb.varway56 : |
177 | env->config->itlb.varway56; |
178 | |
179 | if (!dtlb) { |
180 | wi &= 7; |
181 | } |
182 | |
183 | if (wi < 4) { |
184 | bool is32 = (dtlb ? |
185 | env->config->dtlb.nrefillentries : |
186 | env->config->itlb.nrefillentries) == 32; |
187 | *ei = (v >> 12) & (is32 ? 0x7 : 0x3); |
188 | } else { |
189 | switch (wi) { |
190 | case 4: |
191 | { |
192 | uint32_t eibase = 20 + get_page_size(env, dtlb, wi) * 2; |
193 | *ei = (v >> eibase) & 0x3; |
194 | } |
195 | break; |
196 | |
197 | case 5: |
198 | if (varway56) { |
199 | uint32_t eibase = 27 + get_page_size(env, dtlb, wi); |
200 | *ei = (v >> eibase) & 0x3; |
201 | } else { |
202 | *ei = (v >> 27) & 0x1; |
203 | } |
204 | break; |
205 | |
206 | case 6: |
207 | if (varway56) { |
208 | uint32_t eibase = 29 - get_page_size(env, dtlb, wi); |
209 | *ei = (v >> eibase) & 0x7; |
210 | } else { |
211 | *ei = (v >> 28) & 0x1; |
212 | } |
213 | break; |
214 | |
215 | default: |
216 | *ei = 0; |
217 | break; |
218 | } |
219 | } |
220 | *vpn = v & xtensa_tlb_get_addr_mask(env, dtlb, wi); |
221 | } |
222 | |
223 | /*! |
224 | * Split TLB address into TLB way, entry index and VPN (with index). |
225 | * See ISA, 4.6.5.5 - 4.6.5.8 for the TLB addressing format |
226 | */ |
227 | static void split_tlb_entry_spec(CPUXtensaState *env, uint32_t v, bool dtlb, |
228 | uint32_t *vpn, uint32_t *wi, uint32_t *ei) |
229 | { |
230 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { |
231 | *wi = v & (dtlb ? 0xf : 0x7); |
232 | split_tlb_entry_spec_way(env, v, dtlb, vpn, *wi, ei); |
233 | } else { |
234 | *vpn = v & REGION_PAGE_MASK; |
235 | *wi = 0; |
236 | *ei = (v >> 29) & 0x7; |
237 | } |
238 | } |
239 | |
240 | static xtensa_tlb_entry *xtensa_tlb_get_entry(CPUXtensaState *env, bool dtlb, |
241 | unsigned wi, unsigned ei) |
242 | { |
243 | return dtlb ? |
244 | env->dtlb[wi] + ei : |
245 | env->itlb[wi] + ei; |
246 | } |
247 | |
248 | static xtensa_tlb_entry *get_tlb_entry(CPUXtensaState *env, |
249 | uint32_t v, bool dtlb, uint32_t *pwi) |
250 | { |
251 | uint32_t vpn; |
252 | uint32_t wi; |
253 | uint32_t ei; |
254 | |
255 | split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); |
256 | if (pwi) { |
257 | *pwi = wi; |
258 | } |
259 | return xtensa_tlb_get_entry(env, dtlb, wi, ei); |
260 | } |
261 | |
262 | static void xtensa_tlb_set_entry_mmu(const CPUXtensaState *env, |
263 | xtensa_tlb_entry *entry, bool dtlb, |
264 | unsigned wi, unsigned ei, uint32_t vpn, |
265 | uint32_t pte) |
266 | { |
267 | entry->vaddr = vpn; |
268 | entry->paddr = pte & xtensa_tlb_get_addr_mask(env, dtlb, wi); |
269 | entry->asid = (env->sregs[RASID] >> ((pte >> 1) & 0x18)) & 0xff; |
270 | entry->attr = pte & 0xf; |
271 | } |
272 | |
273 | static void xtensa_tlb_set_entry(CPUXtensaState *env, bool dtlb, |
274 | unsigned wi, unsigned ei, |
275 | uint32_t vpn, uint32_t pte) |
276 | { |
277 | CPUState *cs = env_cpu(env); |
278 | xtensa_tlb_entry *entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); |
279 | |
280 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { |
281 | if (entry->variable) { |
282 | if (entry->asid) { |
283 | tlb_flush_page(cs, entry->vaddr); |
284 | } |
285 | xtensa_tlb_set_entry_mmu(env, entry, dtlb, wi, ei, vpn, pte); |
286 | tlb_flush_page(cs, entry->vaddr); |
287 | } else { |
288 | qemu_log_mask(LOG_GUEST_ERROR, |
289 | "%s %d, %d, %d trying to set immutable entry\n" , |
290 | __func__, dtlb, wi, ei); |
291 | } |
292 | } else { |
293 | tlb_flush_page(cs, entry->vaddr); |
294 | if (xtensa_option_enabled(env->config, |
295 | XTENSA_OPTION_REGION_TRANSLATION)) { |
296 | entry->paddr = pte & REGION_PAGE_MASK; |
297 | } |
298 | entry->attr = pte & 0xf; |
299 | } |
300 | } |
301 | |
302 | hwaddr xtensa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) |
303 | { |
304 | XtensaCPU *cpu = XTENSA_CPU(cs); |
305 | uint32_t paddr; |
306 | uint32_t page_size; |
307 | unsigned access; |
308 | |
309 | if (xtensa_get_physical_addr(&cpu->env, false, addr, 0, 0, |
310 | &paddr, &page_size, &access) == 0) { |
311 | return paddr; |
312 | } |
313 | if (xtensa_get_physical_addr(&cpu->env, false, addr, 2, 0, |
314 | &paddr, &page_size, &access) == 0) { |
315 | return paddr; |
316 | } |
317 | return ~0; |
318 | } |
319 | |
320 | static void reset_tlb_mmu_all_ways(CPUXtensaState *env, |
321 | const xtensa_tlb *tlb, |
322 | xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) |
323 | { |
324 | unsigned wi, ei; |
325 | |
326 | for (wi = 0; wi < tlb->nways; ++wi) { |
327 | for (ei = 0; ei < tlb->way_size[wi]; ++ei) { |
328 | entry[wi][ei].asid = 0; |
329 | entry[wi][ei].variable = true; |
330 | } |
331 | } |
332 | } |
333 | |
334 | static void reset_tlb_mmu_ways56(CPUXtensaState *env, |
335 | const xtensa_tlb *tlb, |
336 | xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) |
337 | { |
338 | if (!tlb->varway56) { |
339 | static const xtensa_tlb_entry way5[] = { |
340 | { |
341 | .vaddr = 0xd0000000, |
342 | .paddr = 0, |
343 | .asid = 1, |
344 | .attr = 7, |
345 | .variable = false, |
346 | }, { |
347 | .vaddr = 0xd8000000, |
348 | .paddr = 0, |
349 | .asid = 1, |
350 | .attr = 3, |
351 | .variable = false, |
352 | } |
353 | }; |
354 | static const xtensa_tlb_entry way6[] = { |
355 | { |
356 | .vaddr = 0xe0000000, |
357 | .paddr = 0xf0000000, |
358 | .asid = 1, |
359 | .attr = 7, |
360 | .variable = false, |
361 | }, { |
362 | .vaddr = 0xf0000000, |
363 | .paddr = 0xf0000000, |
364 | .asid = 1, |
365 | .attr = 3, |
366 | .variable = false, |
367 | } |
368 | }; |
369 | memcpy(entry[5], way5, sizeof(way5)); |
370 | memcpy(entry[6], way6, sizeof(way6)); |
371 | } else { |
372 | uint32_t ei; |
373 | for (ei = 0; ei < 8; ++ei) { |
374 | entry[6][ei].vaddr = ei << 29; |
375 | entry[6][ei].paddr = ei << 29; |
376 | entry[6][ei].asid = 1; |
377 | entry[6][ei].attr = 3; |
378 | } |
379 | } |
380 | } |
381 | |
382 | static void reset_tlb_region_way0(CPUXtensaState *env, |
383 | xtensa_tlb_entry entry[][MAX_TLB_WAY_SIZE]) |
384 | { |
385 | unsigned ei; |
386 | |
387 | for (ei = 0; ei < 8; ++ei) { |
388 | entry[0][ei].vaddr = ei << 29; |
389 | entry[0][ei].paddr = ei << 29; |
390 | entry[0][ei].asid = 1; |
391 | entry[0][ei].attr = 2; |
392 | entry[0][ei].variable = true; |
393 | } |
394 | } |
395 | |
396 | void reset_mmu(CPUXtensaState *env) |
397 | { |
398 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { |
399 | env->sregs[RASID] = 0x04030201; |
400 | env->sregs[ITLBCFG] = 0; |
401 | env->sregs[DTLBCFG] = 0; |
402 | env->autorefill_idx = 0; |
403 | reset_tlb_mmu_all_ways(env, &env->config->itlb, env->itlb); |
404 | reset_tlb_mmu_all_ways(env, &env->config->dtlb, env->dtlb); |
405 | reset_tlb_mmu_ways56(env, &env->config->itlb, env->itlb); |
406 | reset_tlb_mmu_ways56(env, &env->config->dtlb, env->dtlb); |
407 | } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) { |
408 | unsigned i; |
409 | |
410 | env->sregs[MPUENB] = 0; |
411 | env->sregs[MPUCFG] = env->config->n_mpu_fg_segments; |
412 | env->sregs[CACHEADRDIS] = 0; |
413 | assert(env->config->n_mpu_bg_segments > 0 && |
414 | env->config->mpu_bg[0].vaddr == 0); |
415 | for (i = 1; i < env->config->n_mpu_bg_segments; ++i) { |
416 | assert(env->config->mpu_bg[i].vaddr >= |
417 | env->config->mpu_bg[i - 1].vaddr); |
418 | } |
419 | } else { |
420 | env->sregs[CACHEATTR] = 0x22222222; |
421 | reset_tlb_region_way0(env, env->itlb); |
422 | reset_tlb_region_way0(env, env->dtlb); |
423 | } |
424 | } |
425 | |
426 | static unsigned get_ring(const CPUXtensaState *env, uint8_t asid) |
427 | { |
428 | unsigned i; |
429 | for (i = 0; i < 4; ++i) { |
430 | if (((env->sregs[RASID] >> i * 8) & 0xff) == asid) { |
431 | return i; |
432 | } |
433 | } |
434 | return 0xff; |
435 | } |
436 | |
437 | /*! |
438 | * Lookup xtensa TLB for the given virtual address. |
439 | * See ISA, 4.6.2.2 |
440 | * |
441 | * \param pwi: [out] way index |
442 | * \param pei: [out] entry index |
443 | * \param pring: [out] access ring |
444 | * \return 0 if ok, exception cause code otherwise |
445 | */ |
446 | static int xtensa_tlb_lookup(const CPUXtensaState *env, |
447 | uint32_t addr, bool dtlb, |
448 | uint32_t *pwi, uint32_t *pei, uint8_t *pring) |
449 | { |
450 | const xtensa_tlb *tlb = dtlb ? |
451 | &env->config->dtlb : &env->config->itlb; |
452 | const xtensa_tlb_entry (*entry)[MAX_TLB_WAY_SIZE] = dtlb ? |
453 | env->dtlb : env->itlb; |
454 | |
455 | int nhits = 0; |
456 | unsigned wi; |
457 | |
458 | for (wi = 0; wi < tlb->nways; ++wi) { |
459 | uint32_t vpn; |
460 | uint32_t ei; |
461 | split_tlb_entry_spec_way(env, addr, dtlb, &vpn, wi, &ei); |
462 | if (entry[wi][ei].vaddr == vpn && entry[wi][ei].asid) { |
463 | unsigned ring = get_ring(env, entry[wi][ei].asid); |
464 | if (ring < 4) { |
465 | if (++nhits > 1) { |
466 | return dtlb ? |
467 | LOAD_STORE_TLB_MULTI_HIT_CAUSE : |
468 | INST_TLB_MULTI_HIT_CAUSE; |
469 | } |
470 | *pwi = wi; |
471 | *pei = ei; |
472 | *pring = ring; |
473 | } |
474 | } |
475 | } |
476 | return nhits ? 0 : |
477 | (dtlb ? LOAD_STORE_TLB_MISS_CAUSE : INST_TLB_MISS_CAUSE); |
478 | } |
479 | |
480 | uint32_t HELPER(rtlb0)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) |
481 | { |
482 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { |
483 | uint32_t wi; |
484 | const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); |
485 | return (entry->vaddr & get_vpn_mask(env, dtlb, wi)) | entry->asid; |
486 | } else { |
487 | return v & REGION_PAGE_MASK; |
488 | } |
489 | } |
490 | |
491 | uint32_t HELPER(rtlb1)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) |
492 | { |
493 | const xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, NULL); |
494 | return entry->paddr | entry->attr; |
495 | } |
496 | |
497 | void HELPER(itlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) |
498 | { |
499 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { |
500 | uint32_t wi; |
501 | xtensa_tlb_entry *entry = get_tlb_entry(env, v, dtlb, &wi); |
502 | if (entry->variable && entry->asid) { |
503 | tlb_flush_page(env_cpu(env), entry->vaddr); |
504 | entry->asid = 0; |
505 | } |
506 | } |
507 | } |
508 | |
509 | uint32_t HELPER(ptlb)(CPUXtensaState *env, uint32_t v, uint32_t dtlb) |
510 | { |
511 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { |
512 | uint32_t wi; |
513 | uint32_t ei; |
514 | uint8_t ring; |
515 | int res = xtensa_tlb_lookup(env, v, dtlb, &wi, &ei, &ring); |
516 | |
517 | switch (res) { |
518 | case 0: |
519 | if (ring >= xtensa_get_ring(env)) { |
520 | return (v & 0xfffff000) | wi | (dtlb ? 0x10 : 0x8); |
521 | } |
522 | break; |
523 | |
524 | case INST_TLB_MULTI_HIT_CAUSE: |
525 | case LOAD_STORE_TLB_MULTI_HIT_CAUSE: |
526 | HELPER(exception_cause_vaddr)(env, env->pc, res, v); |
527 | break; |
528 | } |
529 | return 0; |
530 | } else { |
531 | return (v & REGION_PAGE_MASK) | 0x1; |
532 | } |
533 | } |
534 | |
535 | void HELPER(wtlb)(CPUXtensaState *env, uint32_t p, uint32_t v, uint32_t dtlb) |
536 | { |
537 | uint32_t vpn; |
538 | uint32_t wi; |
539 | uint32_t ei; |
540 | split_tlb_entry_spec(env, v, dtlb, &vpn, &wi, &ei); |
541 | xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, p); |
542 | } |
543 | |
544 | /*! |
545 | * Convert MMU ATTR to PAGE_{READ,WRITE,EXEC} mask. |
546 | * See ISA, 4.6.5.10 |
547 | */ |
548 | static unsigned mmu_attr_to_access(uint32_t attr) |
549 | { |
550 | unsigned access = 0; |
551 | |
552 | if (attr < 12) { |
553 | access |= PAGE_READ; |
554 | if (attr & 0x1) { |
555 | access |= PAGE_EXEC; |
556 | } |
557 | if (attr & 0x2) { |
558 | access |= PAGE_WRITE; |
559 | } |
560 | |
561 | switch (attr & 0xc) { |
562 | case 0: |
563 | access |= PAGE_CACHE_BYPASS; |
564 | break; |
565 | |
566 | case 4: |
567 | access |= PAGE_CACHE_WB; |
568 | break; |
569 | |
570 | case 8: |
571 | access |= PAGE_CACHE_WT; |
572 | break; |
573 | } |
574 | } else if (attr == 13) { |
575 | access |= PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE; |
576 | } |
577 | return access; |
578 | } |
579 | |
580 | /*! |
581 | * Convert region protection ATTR to PAGE_{READ,WRITE,EXEC} mask. |
582 | * See ISA, 4.6.3.3 |
583 | */ |
584 | static unsigned region_attr_to_access(uint32_t attr) |
585 | { |
586 | static const unsigned access[16] = { |
587 | [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, |
588 | [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, |
589 | [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, |
590 | [3] = PAGE_EXEC | PAGE_CACHE_WB, |
591 | [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, |
592 | [5] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, |
593 | [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, |
594 | }; |
595 | |
596 | return access[attr & 0xf]; |
597 | } |
598 | |
599 | /*! |
600 | * Convert cacheattr to PAGE_{READ,WRITE,EXEC} mask. |
601 | * See ISA, A.2.14 The Cache Attribute Register |
602 | */ |
603 | static unsigned cacheattr_attr_to_access(uint32_t attr) |
604 | { |
605 | static const unsigned access[16] = { |
606 | [0] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_WT, |
607 | [1] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WT, |
608 | [2] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_BYPASS, |
609 | [3] = PAGE_EXEC | PAGE_CACHE_WB, |
610 | [4] = PAGE_READ | PAGE_WRITE | PAGE_EXEC | PAGE_CACHE_WB, |
611 | [14] = PAGE_READ | PAGE_WRITE | PAGE_CACHE_ISOLATE, |
612 | }; |
613 | |
614 | return access[attr & 0xf]; |
615 | } |
616 | |
617 | struct attr_pattern { |
618 | uint32_t mask; |
619 | uint32_t value; |
620 | }; |
621 | |
622 | static int attr_pattern_match(uint32_t attr, |
623 | const struct attr_pattern *pattern, |
624 | size_t n) |
625 | { |
626 | size_t i; |
627 | |
628 | for (i = 0; i < n; ++i) { |
629 | if ((attr & pattern[i].mask) == pattern[i].value) { |
630 | return 1; |
631 | } |
632 | } |
633 | return 0; |
634 | } |
635 | |
636 | static unsigned mpu_attr_to_cpu_cache(uint32_t attr) |
637 | { |
638 | static const struct attr_pattern cpu_c[] = { |
639 | { .mask = 0x18f, .value = 0x089 }, |
640 | { .mask = 0x188, .value = 0x080 }, |
641 | { .mask = 0x180, .value = 0x180 }, |
642 | }; |
643 | |
644 | unsigned type = 0; |
645 | |
646 | if (attr_pattern_match(attr, cpu_c, ARRAY_SIZE(cpu_c))) { |
647 | type |= XTENSA_MPU_TYPE_CPU_CACHE; |
648 | if (attr & 0x10) { |
649 | type |= XTENSA_MPU_TYPE_CPU_C; |
650 | } |
651 | if (attr & 0x20) { |
652 | type |= XTENSA_MPU_TYPE_CPU_W; |
653 | } |
654 | if (attr & 0x40) { |
655 | type |= XTENSA_MPU_TYPE_CPU_R; |
656 | } |
657 | } |
658 | return type; |
659 | } |
660 | |
661 | static unsigned mpu_attr_to_type(uint32_t attr) |
662 | { |
663 | static const struct attr_pattern device_type[] = { |
664 | { .mask = 0x1f6, .value = 0x000 }, |
665 | { .mask = 0x1f6, .value = 0x006 }, |
666 | }; |
667 | static const struct attr_pattern sys_nc_type[] = { |
668 | { .mask = 0x1fe, .value = 0x018 }, |
669 | { .mask = 0x1fe, .value = 0x01e }, |
670 | { .mask = 0x18f, .value = 0x089 }, |
671 | }; |
672 | static const struct attr_pattern sys_c_type[] = { |
673 | { .mask = 0x1f8, .value = 0x010 }, |
674 | { .mask = 0x188, .value = 0x080 }, |
675 | { .mask = 0x1f0, .value = 0x030 }, |
676 | { .mask = 0x180, .value = 0x180 }, |
677 | }; |
678 | static const struct attr_pattern b[] = { |
679 | { .mask = 0x1f7, .value = 0x001 }, |
680 | { .mask = 0x1f7, .value = 0x007 }, |
681 | { .mask = 0x1ff, .value = 0x019 }, |
682 | { .mask = 0x1ff, .value = 0x01f }, |
683 | }; |
684 | |
685 | unsigned type = 0; |
686 | |
687 | attr = (attr & XTENSA_MPU_MEM_TYPE_MASK) >> XTENSA_MPU_MEM_TYPE_SHIFT; |
688 | if (attr_pattern_match(attr, device_type, ARRAY_SIZE(device_type))) { |
689 | type |= XTENSA_MPU_SYSTEM_TYPE_DEVICE; |
690 | if (attr & 0x80) { |
691 | type |= XTENSA_MPU_TYPE_INT; |
692 | } |
693 | } |
694 | if (attr_pattern_match(attr, sys_nc_type, ARRAY_SIZE(sys_nc_type))) { |
695 | type |= XTENSA_MPU_SYSTEM_TYPE_NC; |
696 | } |
697 | if (attr_pattern_match(attr, sys_c_type, ARRAY_SIZE(sys_c_type))) { |
698 | type |= XTENSA_MPU_SYSTEM_TYPE_C; |
699 | if (attr & 0x1) { |
700 | type |= XTENSA_MPU_TYPE_SYS_C; |
701 | } |
702 | if (attr & 0x2) { |
703 | type |= XTENSA_MPU_TYPE_SYS_W; |
704 | } |
705 | if (attr & 0x4) { |
706 | type |= XTENSA_MPU_TYPE_SYS_R; |
707 | } |
708 | } |
709 | if (attr_pattern_match(attr, b, ARRAY_SIZE(b))) { |
710 | type |= XTENSA_MPU_TYPE_B; |
711 | } |
712 | type |= mpu_attr_to_cpu_cache(attr); |
713 | |
714 | return type; |
715 | } |
716 | |
717 | static unsigned mpu_attr_to_access(uint32_t attr, unsigned ring) |
718 | { |
719 | static const unsigned access[2][16] = { |
720 | [0] = { |
721 | [4] = PAGE_READ, |
722 | [5] = PAGE_READ | PAGE_EXEC, |
723 | [6] = PAGE_READ | PAGE_WRITE, |
724 | [7] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, |
725 | [8] = PAGE_WRITE, |
726 | [9] = PAGE_READ | PAGE_WRITE, |
727 | [10] = PAGE_READ | PAGE_WRITE, |
728 | [11] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, |
729 | [12] = PAGE_READ, |
730 | [13] = PAGE_READ | PAGE_EXEC, |
731 | [14] = PAGE_READ | PAGE_WRITE, |
732 | [15] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, |
733 | }, |
734 | [1] = { |
735 | [8] = PAGE_WRITE, |
736 | [9] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, |
737 | [10] = PAGE_READ, |
738 | [11] = PAGE_READ | PAGE_EXEC, |
739 | [12] = PAGE_READ, |
740 | [13] = PAGE_READ | PAGE_EXEC, |
741 | [14] = PAGE_READ | PAGE_WRITE, |
742 | [15] = PAGE_READ | PAGE_WRITE | PAGE_EXEC, |
743 | }, |
744 | }; |
745 | unsigned rv; |
746 | unsigned type; |
747 | |
748 | type = mpu_attr_to_cpu_cache(attr); |
749 | rv = access[ring != 0][(attr & XTENSA_MPU_ACC_RIGHTS_MASK) >> |
750 | XTENSA_MPU_ACC_RIGHTS_SHIFT]; |
751 | |
752 | if (type & XTENSA_MPU_TYPE_CPU_CACHE) { |
753 | rv |= (type & XTENSA_MPU_TYPE_CPU_C) ? PAGE_CACHE_WB : PAGE_CACHE_WT; |
754 | } else { |
755 | rv |= PAGE_CACHE_BYPASS; |
756 | } |
757 | return rv; |
758 | } |
759 | |
760 | static bool is_access_granted(unsigned access, int is_write) |
761 | { |
762 | switch (is_write) { |
763 | case 0: |
764 | return access & PAGE_READ; |
765 | |
766 | case 1: |
767 | return access & PAGE_WRITE; |
768 | |
769 | case 2: |
770 | return access & PAGE_EXEC; |
771 | |
772 | default: |
773 | return 0; |
774 | } |
775 | } |
776 | |
777 | static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte); |
778 | |
779 | static int get_physical_addr_mmu(CPUXtensaState *env, bool update_tlb, |
780 | uint32_t vaddr, int is_write, int mmu_idx, |
781 | uint32_t *paddr, uint32_t *page_size, |
782 | unsigned *access, bool may_lookup_pt) |
783 | { |
784 | bool dtlb = is_write != 2; |
785 | uint32_t wi; |
786 | uint32_t ei; |
787 | uint8_t ring; |
788 | uint32_t vpn; |
789 | uint32_t pte; |
790 | const xtensa_tlb_entry *entry = NULL; |
791 | xtensa_tlb_entry tmp_entry; |
792 | int ret = xtensa_tlb_lookup(env, vaddr, dtlb, &wi, &ei, &ring); |
793 | |
794 | if ((ret == INST_TLB_MISS_CAUSE || ret == LOAD_STORE_TLB_MISS_CAUSE) && |
795 | may_lookup_pt && get_pte(env, vaddr, &pte)) { |
796 | ring = (pte >> 4) & 0x3; |
797 | wi = 0; |
798 | split_tlb_entry_spec_way(env, vaddr, dtlb, &vpn, wi, &ei); |
799 | |
800 | if (update_tlb) { |
801 | wi = ++env->autorefill_idx & 0x3; |
802 | xtensa_tlb_set_entry(env, dtlb, wi, ei, vpn, pte); |
803 | env->sregs[EXCVADDR] = vaddr; |
804 | qemu_log_mask(CPU_LOG_MMU, "%s: autorefill(%08x): %08x -> %08x\n" , |
805 | __func__, vaddr, vpn, pte); |
806 | } else { |
807 | xtensa_tlb_set_entry_mmu(env, &tmp_entry, dtlb, wi, ei, vpn, pte); |
808 | entry = &tmp_entry; |
809 | } |
810 | ret = 0; |
811 | } |
812 | if (ret != 0) { |
813 | return ret; |
814 | } |
815 | |
816 | if (entry == NULL) { |
817 | entry = xtensa_tlb_get_entry(env, dtlb, wi, ei); |
818 | } |
819 | |
820 | if (ring < mmu_idx) { |
821 | return dtlb ? |
822 | LOAD_STORE_PRIVILEGE_CAUSE : |
823 | INST_FETCH_PRIVILEGE_CAUSE; |
824 | } |
825 | |
826 | *access = mmu_attr_to_access(entry->attr) & |
827 | ~(dtlb ? PAGE_EXEC : PAGE_READ | PAGE_WRITE); |
828 | if (!is_access_granted(*access, is_write)) { |
829 | return dtlb ? |
830 | (is_write ? |
831 | STORE_PROHIBITED_CAUSE : |
832 | LOAD_PROHIBITED_CAUSE) : |
833 | INST_FETCH_PROHIBITED_CAUSE; |
834 | } |
835 | |
836 | *paddr = entry->paddr | (vaddr & ~xtensa_tlb_get_addr_mask(env, dtlb, wi)); |
837 | *page_size = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; |
838 | |
839 | return 0; |
840 | } |
841 | |
842 | static bool get_pte(CPUXtensaState *env, uint32_t vaddr, uint32_t *pte) |
843 | { |
844 | CPUState *cs = env_cpu(env); |
845 | uint32_t paddr; |
846 | uint32_t page_size; |
847 | unsigned access; |
848 | uint32_t pt_vaddr = |
849 | (env->sregs[PTEVADDR] | (vaddr >> 10)) & 0xfffffffc; |
850 | int ret = get_physical_addr_mmu(env, false, pt_vaddr, 0, 0, |
851 | &paddr, &page_size, &access, false); |
852 | |
853 | if (ret == 0) { |
854 | qemu_log_mask(CPU_LOG_MMU, |
855 | "%s: autorefill(%08x): PTE va = %08x, pa = %08x\n" , |
856 | __func__, vaddr, pt_vaddr, paddr); |
857 | } else { |
858 | qemu_log_mask(CPU_LOG_MMU, |
859 | "%s: autorefill(%08x): PTE va = %08x, failed (%d)\n" , |
860 | __func__, vaddr, pt_vaddr, ret); |
861 | } |
862 | |
863 | if (ret == 0) { |
864 | MemTxResult result; |
865 | |
866 | *pte = address_space_ldl(cs->as, paddr, MEMTXATTRS_UNSPECIFIED, |
867 | &result); |
868 | if (result != MEMTX_OK) { |
869 | qemu_log_mask(CPU_LOG_MMU, |
870 | "%s: couldn't load PTE: transaction failed (%u)\n" , |
871 | __func__, (unsigned)result); |
872 | ret = 1; |
873 | } |
874 | } |
875 | return ret == 0; |
876 | } |
877 | |
878 | static int get_physical_addr_region(CPUXtensaState *env, |
879 | uint32_t vaddr, int is_write, int mmu_idx, |
880 | uint32_t *paddr, uint32_t *page_size, |
881 | unsigned *access) |
882 | { |
883 | bool dtlb = is_write != 2; |
884 | uint32_t wi = 0; |
885 | uint32_t ei = (vaddr >> 29) & 0x7; |
886 | const xtensa_tlb_entry *entry = |
887 | xtensa_tlb_get_entry(env, dtlb, wi, ei); |
888 | |
889 | *access = region_attr_to_access(entry->attr); |
890 | if (!is_access_granted(*access, is_write)) { |
891 | return dtlb ? |
892 | (is_write ? |
893 | STORE_PROHIBITED_CAUSE : |
894 | LOAD_PROHIBITED_CAUSE) : |
895 | INST_FETCH_PROHIBITED_CAUSE; |
896 | } |
897 | |
898 | *paddr = entry->paddr | (vaddr & ~REGION_PAGE_MASK); |
899 | *page_size = ~REGION_PAGE_MASK + 1; |
900 | |
901 | return 0; |
902 | } |
903 | |
904 | static int xtensa_mpu_lookup(const xtensa_mpu_entry *entry, unsigned n, |
905 | uint32_t vaddr, unsigned *segment) |
906 | { |
907 | unsigned nhits = 0; |
908 | unsigned i; |
909 | |
910 | for (i = 0; i < n; ++i) { |
911 | if (vaddr >= entry[i].vaddr && |
912 | (i == n - 1 || vaddr < entry[i + 1].vaddr)) { |
913 | if (nhits++) { |
914 | break; |
915 | } |
916 | *segment = i; |
917 | } |
918 | } |
919 | return nhits; |
920 | } |
921 | |
922 | void HELPER(wsr_mpuenb)(CPUXtensaState *env, uint32_t v) |
923 | { |
924 | v &= (2u << (env->config->n_mpu_fg_segments - 1)) - 1; |
925 | |
926 | if (v != env->sregs[MPUENB]) { |
927 | env->sregs[MPUENB] = v; |
928 | tlb_flush(env_cpu(env)); |
929 | } |
930 | } |
931 | |
932 | void HELPER(wptlb)(CPUXtensaState *env, uint32_t p, uint32_t v) |
933 | { |
934 | unsigned segment = p & XTENSA_MPU_SEGMENT_MASK; |
935 | |
936 | if (segment < env->config->n_mpu_fg_segments) { |
937 | env->mpu_fg[segment].vaddr = v & -env->config->mpu_align; |
938 | env->mpu_fg[segment].attr = p & XTENSA_MPU_ATTR_MASK; |
939 | env->sregs[MPUENB] = deposit32(env->sregs[MPUENB], segment, 1, v); |
940 | tlb_flush(env_cpu(env)); |
941 | } |
942 | } |
943 | |
944 | uint32_t HELPER(rptlb0)(CPUXtensaState *env, uint32_t s) |
945 | { |
946 | unsigned segment = s & XTENSA_MPU_SEGMENT_MASK; |
947 | |
948 | if (segment < env->config->n_mpu_fg_segments) { |
949 | return env->mpu_fg[segment].vaddr | |
950 | extract32(env->sregs[MPUENB], segment, 1); |
951 | } else { |
952 | return 0; |
953 | } |
954 | } |
955 | |
956 | uint32_t HELPER(rptlb1)(CPUXtensaState *env, uint32_t s) |
957 | { |
958 | unsigned segment = s & XTENSA_MPU_SEGMENT_MASK; |
959 | |
960 | if (segment < env->config->n_mpu_fg_segments) { |
961 | return env->mpu_fg[segment].attr; |
962 | } else { |
963 | return 0; |
964 | } |
965 | } |
966 | |
967 | uint32_t HELPER(pptlb)(CPUXtensaState *env, uint32_t v) |
968 | { |
969 | unsigned nhits; |
970 | unsigned segment = XTENSA_MPU_PROBE_B; |
971 | unsigned bg_segment; |
972 | |
973 | nhits = xtensa_mpu_lookup(env->mpu_fg, env->config->n_mpu_fg_segments, |
974 | v, &segment); |
975 | if (nhits > 1) { |
976 | HELPER(exception_cause_vaddr)(env, env->pc, |
977 | LOAD_STORE_TLB_MULTI_HIT_CAUSE, v); |
978 | } else if (nhits == 1 && (env->sregs[MPUENB] & (1u << segment))) { |
979 | return env->mpu_fg[segment].attr | segment | XTENSA_MPU_PROBE_V; |
980 | } else { |
981 | xtensa_mpu_lookup(env->config->mpu_bg, |
982 | env->config->n_mpu_bg_segments, |
983 | v, &bg_segment); |
984 | return env->config->mpu_bg[bg_segment].attr | segment; |
985 | } |
986 | } |
987 | |
988 | static int get_physical_addr_mpu(CPUXtensaState *env, |
989 | uint32_t vaddr, int is_write, int mmu_idx, |
990 | uint32_t *paddr, uint32_t *page_size, |
991 | unsigned *access) |
992 | { |
993 | unsigned nhits; |
994 | unsigned segment; |
995 | uint32_t attr; |
996 | |
997 | nhits = xtensa_mpu_lookup(env->mpu_fg, env->config->n_mpu_fg_segments, |
998 | vaddr, &segment); |
999 | if (nhits > 1) { |
1000 | return is_write < 2 ? |
1001 | LOAD_STORE_TLB_MULTI_HIT_CAUSE : |
1002 | INST_TLB_MULTI_HIT_CAUSE; |
1003 | } else if (nhits == 1 && (env->sregs[MPUENB] & (1u << segment))) { |
1004 | attr = env->mpu_fg[segment].attr; |
1005 | } else { |
1006 | xtensa_mpu_lookup(env->config->mpu_bg, |
1007 | env->config->n_mpu_bg_segments, |
1008 | vaddr, &segment); |
1009 | attr = env->config->mpu_bg[segment].attr; |
1010 | } |
1011 | |
1012 | *access = mpu_attr_to_access(attr, mmu_idx); |
1013 | if (!is_access_granted(*access, is_write)) { |
1014 | return is_write < 2 ? |
1015 | (is_write ? |
1016 | STORE_PROHIBITED_CAUSE : |
1017 | LOAD_PROHIBITED_CAUSE) : |
1018 | INST_FETCH_PROHIBITED_CAUSE; |
1019 | } |
1020 | *paddr = vaddr; |
1021 | *page_size = env->config->mpu_align; |
1022 | return 0; |
1023 | } |
1024 | |
1025 | /*! |
1026 | * Convert virtual address to physical addr. |
1027 | * MMU may issue pagewalk and change xtensa autorefill TLB way entry. |
1028 | * |
1029 | * \return 0 if ok, exception cause code otherwise |
1030 | */ |
1031 | int xtensa_get_physical_addr(CPUXtensaState *env, bool update_tlb, |
1032 | uint32_t vaddr, int is_write, int mmu_idx, |
1033 | uint32_t *paddr, uint32_t *page_size, |
1034 | unsigned *access) |
1035 | { |
1036 | if (xtensa_option_enabled(env->config, XTENSA_OPTION_MMU)) { |
1037 | return get_physical_addr_mmu(env, update_tlb, |
1038 | vaddr, is_write, mmu_idx, paddr, |
1039 | page_size, access, true); |
1040 | } else if (xtensa_option_bits_enabled(env->config, |
1041 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | |
1042 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION))) { |
1043 | return get_physical_addr_region(env, vaddr, is_write, mmu_idx, |
1044 | paddr, page_size, access); |
1045 | } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) { |
1046 | return get_physical_addr_mpu(env, vaddr, is_write, mmu_idx, |
1047 | paddr, page_size, access); |
1048 | } else { |
1049 | *paddr = vaddr; |
1050 | *page_size = TARGET_PAGE_SIZE; |
1051 | *access = cacheattr_attr_to_access(env->sregs[CACHEATTR] >> |
1052 | ((vaddr & 0xe0000000) >> 27)); |
1053 | return 0; |
1054 | } |
1055 | } |
1056 | |
1057 | static void dump_tlb(CPUXtensaState *env, bool dtlb) |
1058 | { |
1059 | unsigned wi, ei; |
1060 | const xtensa_tlb *conf = |
1061 | dtlb ? &env->config->dtlb : &env->config->itlb; |
1062 | unsigned (*attr_to_access)(uint32_t) = |
1063 | xtensa_option_enabled(env->config, XTENSA_OPTION_MMU) ? |
1064 | mmu_attr_to_access : region_attr_to_access; |
1065 | |
1066 | for (wi = 0; wi < conf->nways; ++wi) { |
1067 | uint32_t sz = ~xtensa_tlb_get_addr_mask(env, dtlb, wi) + 1; |
1068 | const char *sz_text; |
1069 | bool = true; |
1070 | |
1071 | if (sz >= 0x100000) { |
1072 | sz /= MiB; |
1073 | sz_text = "MB" ; |
1074 | } else { |
1075 | sz /= KiB; |
1076 | sz_text = "KB" ; |
1077 | } |
1078 | |
1079 | for (ei = 0; ei < conf->way_size[wi]; ++ei) { |
1080 | const xtensa_tlb_entry *entry = |
1081 | xtensa_tlb_get_entry(env, dtlb, wi, ei); |
1082 | |
1083 | if (entry->asid) { |
1084 | static const char * const cache_text[8] = { |
1085 | [PAGE_CACHE_BYPASS >> PAGE_CACHE_SHIFT] = "Bypass" , |
1086 | [PAGE_CACHE_WT >> PAGE_CACHE_SHIFT] = "WT" , |
1087 | [PAGE_CACHE_WB >> PAGE_CACHE_SHIFT] = "WB" , |
1088 | [PAGE_CACHE_ISOLATE >> PAGE_CACHE_SHIFT] = "Isolate" , |
1089 | }; |
1090 | unsigned access = attr_to_access(entry->attr); |
1091 | unsigned cache_idx = (access & PAGE_CACHE_MASK) >> |
1092 | PAGE_CACHE_SHIFT; |
1093 | |
1094 | if (print_header) { |
1095 | print_header = false; |
1096 | qemu_printf("Way %u (%d %s)\n" , wi, sz, sz_text); |
1097 | qemu_printf("\tVaddr Paddr ASID Attr RWX Cache\n" |
1098 | "\t---------- ---------- ---- ---- --- -------\n" ); |
1099 | } |
1100 | qemu_printf("\t0x%08x 0x%08x 0x%02x 0x%02x %c%c%c %-7s\n" , |
1101 | entry->vaddr, |
1102 | entry->paddr, |
1103 | entry->asid, |
1104 | entry->attr, |
1105 | (access & PAGE_READ) ? 'R' : '-', |
1106 | (access & PAGE_WRITE) ? 'W' : '-', |
1107 | (access & PAGE_EXEC) ? 'X' : '-', |
1108 | cache_text[cache_idx] ? |
1109 | cache_text[cache_idx] : "Invalid" ); |
1110 | } |
1111 | } |
1112 | } |
1113 | } |
1114 | |
1115 | static void dump_mpu(CPUXtensaState *env, |
1116 | const xtensa_mpu_entry *entry, unsigned n) |
1117 | { |
1118 | unsigned i; |
1119 | |
1120 | qemu_printf("\t%s Vaddr Attr Ring0 Ring1 System Type CPU cache\n" |
1121 | "\t%s ---------- ---------- ----- ----- ------------- ---------\n" , |
1122 | env ? "En" : " " , |
1123 | env ? "--" : " " ); |
1124 | |
1125 | for (i = 0; i < n; ++i) { |
1126 | uint32_t attr = entry[i].attr; |
1127 | unsigned access0 = mpu_attr_to_access(attr, 0); |
1128 | unsigned access1 = mpu_attr_to_access(attr, 1); |
1129 | unsigned type = mpu_attr_to_type(attr); |
1130 | char cpu_cache = (type & XTENSA_MPU_TYPE_CPU_CACHE) ? '-' : ' '; |
1131 | |
1132 | qemu_printf("\t %c 0x%08x 0x%08x %c%c%c %c%c%c " , |
1133 | env ? |
1134 | ((env->sregs[MPUENB] & (1u << i)) ? '+' : '-') : ' ', |
1135 | entry[i].vaddr, attr, |
1136 | (access0 & PAGE_READ) ? 'R' : '-', |
1137 | (access0 & PAGE_WRITE) ? 'W' : '-', |
1138 | (access0 & PAGE_EXEC) ? 'X' : '-', |
1139 | (access1 & PAGE_READ) ? 'R' : '-', |
1140 | (access1 & PAGE_WRITE) ? 'W' : '-', |
1141 | (access1 & PAGE_EXEC) ? 'X' : '-'); |
1142 | |
1143 | switch (type & XTENSA_MPU_SYSTEM_TYPE_MASK) { |
1144 | case XTENSA_MPU_SYSTEM_TYPE_DEVICE: |
1145 | qemu_printf("Device %cB %3s\n" , |
1146 | (type & XTENSA_MPU_TYPE_B) ? ' ' : 'n', |
1147 | (type & XTENSA_MPU_TYPE_INT) ? "int" : "" ); |
1148 | break; |
1149 | case XTENSA_MPU_SYSTEM_TYPE_NC: |
1150 | qemu_printf("Sys NC %cB %c%c%c\n" , |
1151 | (type & XTENSA_MPU_TYPE_B) ? ' ' : 'n', |
1152 | (type & XTENSA_MPU_TYPE_CPU_R) ? 'r' : cpu_cache, |
1153 | (type & XTENSA_MPU_TYPE_CPU_W) ? 'w' : cpu_cache, |
1154 | (type & XTENSA_MPU_TYPE_CPU_C) ? 'c' : cpu_cache); |
1155 | break; |
1156 | case XTENSA_MPU_SYSTEM_TYPE_C: |
1157 | qemu_printf("Sys C %c%c%c %c%c%c\n" , |
1158 | (type & XTENSA_MPU_TYPE_SYS_R) ? 'R' : '-', |
1159 | (type & XTENSA_MPU_TYPE_SYS_W) ? 'W' : '-', |
1160 | (type & XTENSA_MPU_TYPE_SYS_C) ? 'C' : '-', |
1161 | (type & XTENSA_MPU_TYPE_CPU_R) ? 'r' : cpu_cache, |
1162 | (type & XTENSA_MPU_TYPE_CPU_W) ? 'w' : cpu_cache, |
1163 | (type & XTENSA_MPU_TYPE_CPU_C) ? 'c' : cpu_cache); |
1164 | break; |
1165 | default: |
1166 | qemu_printf("Unknown\n" ); |
1167 | break; |
1168 | } |
1169 | } |
1170 | } |
1171 | |
1172 | void dump_mmu(CPUXtensaState *env) |
1173 | { |
1174 | if (xtensa_option_bits_enabled(env->config, |
1175 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_PROTECTION) | |
1176 | XTENSA_OPTION_BIT(XTENSA_OPTION_REGION_TRANSLATION) | |
1177 | XTENSA_OPTION_BIT(XTENSA_OPTION_MMU))) { |
1178 | |
1179 | qemu_printf("ITLB:\n" ); |
1180 | dump_tlb(env, false); |
1181 | qemu_printf("\nDTLB:\n" ); |
1182 | dump_tlb(env, true); |
1183 | } else if (xtensa_option_enabled(env->config, XTENSA_OPTION_MPU)) { |
1184 | qemu_printf("Foreground map:\n" ); |
1185 | dump_mpu(env, env->mpu_fg, env->config->n_mpu_fg_segments); |
1186 | qemu_printf("\nBackground map:\n" ); |
1187 | dump_mpu(NULL, env->config->mpu_bg, env->config->n_mpu_bg_segments); |
1188 | } else { |
1189 | qemu_printf("No TLB for this CPU core\n" ); |
1190 | } |
1191 | } |
1192 | |