1 | /* |
2 | * Copyright (C) 2014-2016 Broadcom Corporation |
3 | * Copyright (c) 2017 Red Hat, Inc. |
4 | * Written by Prem Mallappa, Eric Auger |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. |
9 | * |
10 | * This program is distributed in the hope that it will be useful, |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
13 | * GNU General Public License for more details. |
14 | * |
15 | * You should have received a copy of the GNU General Public License along |
16 | * with this program; if not, see <http://www.gnu.org/licenses/>. |
17 | */ |
18 | |
19 | #include "qemu/osdep.h" |
20 | #include "hw/irq.h" |
21 | #include "hw/sysbus.h" |
22 | #include "migration/vmstate.h" |
23 | #include "hw/qdev-core.h" |
24 | #include "hw/pci/pci.h" |
25 | #include "exec/address-spaces.h" |
26 | #include "cpu.h" |
27 | #include "trace.h" |
28 | #include "qemu/log.h" |
29 | #include "qemu/error-report.h" |
30 | #include "qapi/error.h" |
31 | |
32 | #include "hw/arm/smmuv3.h" |
33 | #include "smmuv3-internal.h" |
34 | |
35 | /** |
36 | * smmuv3_trigger_irq - pulse @irq if enabled and update |
37 | * GERROR register in case of GERROR interrupt |
38 | * |
39 | * @irq: irq type |
40 | * @gerror_mask: mask of gerrors to toggle (relevant if @irq is GERROR) |
41 | */ |
42 | static void smmuv3_trigger_irq(SMMUv3State *s, SMMUIrq irq, |
43 | uint32_t gerror_mask) |
44 | { |
45 | |
46 | bool pulse = false; |
47 | |
48 | switch (irq) { |
49 | case SMMU_IRQ_EVTQ: |
50 | pulse = smmuv3_eventq_irq_enabled(s); |
51 | break; |
52 | case SMMU_IRQ_PRIQ: |
53 | qemu_log_mask(LOG_UNIMP, "PRI not yet supported\n" ); |
54 | break; |
55 | case SMMU_IRQ_CMD_SYNC: |
56 | pulse = true; |
57 | break; |
58 | case SMMU_IRQ_GERROR: |
59 | { |
60 | uint32_t pending = s->gerror ^ s->gerrorn; |
61 | uint32_t new_gerrors = ~pending & gerror_mask; |
62 | |
63 | if (!new_gerrors) { |
64 | /* only toggle non pending errors */ |
65 | return; |
66 | } |
67 | s->gerror ^= new_gerrors; |
68 | trace_smmuv3_write_gerror(new_gerrors, s->gerror); |
69 | |
70 | pulse = smmuv3_gerror_irq_enabled(s); |
71 | break; |
72 | } |
73 | } |
74 | if (pulse) { |
75 | trace_smmuv3_trigger_irq(irq); |
76 | qemu_irq_pulse(s->irq[irq]); |
77 | } |
78 | } |
79 | |
80 | static void smmuv3_write_gerrorn(SMMUv3State *s, uint32_t new_gerrorn) |
81 | { |
82 | uint32_t pending = s->gerror ^ s->gerrorn; |
83 | uint32_t toggled = s->gerrorn ^ new_gerrorn; |
84 | |
85 | if (toggled & ~pending) { |
86 | qemu_log_mask(LOG_GUEST_ERROR, |
87 | "guest toggles non pending errors = 0x%x\n" , |
88 | toggled & ~pending); |
89 | } |
90 | |
91 | /* |
92 | * We do not raise any error in case guest toggles bits corresponding |
93 | * to not active IRQs (CONSTRAINED UNPREDICTABLE) |
94 | */ |
95 | s->gerrorn = new_gerrorn; |
96 | |
97 | trace_smmuv3_write_gerrorn(toggled & pending, s->gerrorn); |
98 | } |
99 | |
100 | static inline MemTxResult queue_read(SMMUQueue *q, void *data) |
101 | { |
102 | dma_addr_t addr = Q_CONS_ENTRY(q); |
103 | |
104 | return dma_memory_read(&address_space_memory, addr, data, q->entry_size); |
105 | } |
106 | |
107 | static MemTxResult queue_write(SMMUQueue *q, void *data) |
108 | { |
109 | dma_addr_t addr = Q_PROD_ENTRY(q); |
110 | MemTxResult ret; |
111 | |
112 | ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size); |
113 | if (ret != MEMTX_OK) { |
114 | return ret; |
115 | } |
116 | |
117 | queue_prod_incr(q); |
118 | return MEMTX_OK; |
119 | } |
120 | |
121 | static MemTxResult smmuv3_write_eventq(SMMUv3State *s, Evt *evt) |
122 | { |
123 | SMMUQueue *q = &s->eventq; |
124 | MemTxResult r; |
125 | |
126 | if (!smmuv3_eventq_enabled(s)) { |
127 | return MEMTX_ERROR; |
128 | } |
129 | |
130 | if (smmuv3_q_full(q)) { |
131 | return MEMTX_ERROR; |
132 | } |
133 | |
134 | r = queue_write(q, evt); |
135 | if (r != MEMTX_OK) { |
136 | return r; |
137 | } |
138 | |
139 | if (!smmuv3_q_empty(q)) { |
140 | smmuv3_trigger_irq(s, SMMU_IRQ_EVTQ, 0); |
141 | } |
142 | return MEMTX_OK; |
143 | } |
144 | |
145 | void smmuv3_record_event(SMMUv3State *s, SMMUEventInfo *info) |
146 | { |
147 | Evt evt = {}; |
148 | MemTxResult r; |
149 | |
150 | if (!smmuv3_eventq_enabled(s)) { |
151 | return; |
152 | } |
153 | |
154 | EVT_SET_TYPE(&evt, info->type); |
155 | EVT_SET_SID(&evt, info->sid); |
156 | |
157 | switch (info->type) { |
158 | case SMMU_EVT_NONE: |
159 | return; |
160 | case SMMU_EVT_F_UUT: |
161 | EVT_SET_SSID(&evt, info->u.f_uut.ssid); |
162 | EVT_SET_SSV(&evt, info->u.f_uut.ssv); |
163 | EVT_SET_ADDR(&evt, info->u.f_uut.addr); |
164 | EVT_SET_RNW(&evt, info->u.f_uut.rnw); |
165 | EVT_SET_PNU(&evt, info->u.f_uut.pnu); |
166 | EVT_SET_IND(&evt, info->u.f_uut.ind); |
167 | break; |
168 | case SMMU_EVT_C_BAD_STREAMID: |
169 | EVT_SET_SSID(&evt, info->u.c_bad_streamid.ssid); |
170 | EVT_SET_SSV(&evt, info->u.c_bad_streamid.ssv); |
171 | break; |
172 | case SMMU_EVT_F_STE_FETCH: |
173 | EVT_SET_SSID(&evt, info->u.f_ste_fetch.ssid); |
174 | EVT_SET_SSV(&evt, info->u.f_ste_fetch.ssv); |
175 | EVT_SET_ADDR(&evt, info->u.f_ste_fetch.addr); |
176 | break; |
177 | case SMMU_EVT_C_BAD_STE: |
178 | EVT_SET_SSID(&evt, info->u.c_bad_ste.ssid); |
179 | EVT_SET_SSV(&evt, info->u.c_bad_ste.ssv); |
180 | break; |
181 | case SMMU_EVT_F_STREAM_DISABLED: |
182 | break; |
183 | case SMMU_EVT_F_TRANS_FORBIDDEN: |
184 | EVT_SET_ADDR(&evt, info->u.f_transl_forbidden.addr); |
185 | EVT_SET_RNW(&evt, info->u.f_transl_forbidden.rnw); |
186 | break; |
187 | case SMMU_EVT_C_BAD_SUBSTREAMID: |
188 | EVT_SET_SSID(&evt, info->u.c_bad_substream.ssid); |
189 | break; |
190 | case SMMU_EVT_F_CD_FETCH: |
191 | EVT_SET_SSID(&evt, info->u.f_cd_fetch.ssid); |
192 | EVT_SET_SSV(&evt, info->u.f_cd_fetch.ssv); |
193 | EVT_SET_ADDR(&evt, info->u.f_cd_fetch.addr); |
194 | break; |
195 | case SMMU_EVT_C_BAD_CD: |
196 | EVT_SET_SSID(&evt, info->u.c_bad_cd.ssid); |
197 | EVT_SET_SSV(&evt, info->u.c_bad_cd.ssv); |
198 | break; |
199 | case SMMU_EVT_F_WALK_EABT: |
200 | case SMMU_EVT_F_TRANSLATION: |
201 | case SMMU_EVT_F_ADDR_SIZE: |
202 | case SMMU_EVT_F_ACCESS: |
203 | case SMMU_EVT_F_PERMISSION: |
204 | EVT_SET_STALL(&evt, info->u.f_walk_eabt.stall); |
205 | EVT_SET_STAG(&evt, info->u.f_walk_eabt.stag); |
206 | EVT_SET_SSID(&evt, info->u.f_walk_eabt.ssid); |
207 | EVT_SET_SSV(&evt, info->u.f_walk_eabt.ssv); |
208 | EVT_SET_S2(&evt, info->u.f_walk_eabt.s2); |
209 | EVT_SET_ADDR(&evt, info->u.f_walk_eabt.addr); |
210 | EVT_SET_RNW(&evt, info->u.f_walk_eabt.rnw); |
211 | EVT_SET_PNU(&evt, info->u.f_walk_eabt.pnu); |
212 | EVT_SET_IND(&evt, info->u.f_walk_eabt.ind); |
213 | EVT_SET_CLASS(&evt, info->u.f_walk_eabt.class); |
214 | EVT_SET_ADDR2(&evt, info->u.f_walk_eabt.addr2); |
215 | break; |
216 | case SMMU_EVT_F_CFG_CONFLICT: |
217 | EVT_SET_SSID(&evt, info->u.f_cfg_conflict.ssid); |
218 | EVT_SET_SSV(&evt, info->u.f_cfg_conflict.ssv); |
219 | break; |
220 | /* rest is not implemented */ |
221 | case SMMU_EVT_F_BAD_ATS_TREQ: |
222 | case SMMU_EVT_F_TLB_CONFLICT: |
223 | case SMMU_EVT_E_PAGE_REQ: |
224 | default: |
225 | g_assert_not_reached(); |
226 | } |
227 | |
228 | trace_smmuv3_record_event(smmu_event_string(info->type), info->sid); |
229 | r = smmuv3_write_eventq(s, &evt); |
230 | if (r != MEMTX_OK) { |
231 | smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_EVENTQ_ABT_ERR_MASK); |
232 | } |
233 | info->recorded = true; |
234 | } |
235 | |
236 | static void smmuv3_init_regs(SMMUv3State *s) |
237 | { |
238 | /** |
239 | * IDR0: stage1 only, AArch64 only, coherent access, 16b ASID, |
240 | * multi-level stream table |
241 | */ |
242 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, S1P, 1); /* stage 1 supported */ |
243 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTF, 2); /* AArch64 PTW only */ |
244 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, COHACC, 1); /* IO coherent */ |
245 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, ASID16, 1); /* 16-bit ASID */ |
246 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TTENDIAN, 2); /* little endian */ |
247 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STALL_MODEL, 1); /* No stall */ |
248 | /* terminated transaction will always be aborted/error returned */ |
249 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, TERM_MODEL, 1); |
250 | /* 2-level stream table supported */ |
251 | s->idr[0] = FIELD_DP32(s->idr[0], IDR0, STLEVEL, 1); |
252 | |
253 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, SIDSIZE, SMMU_IDR1_SIDSIZE); |
254 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, EVENTQS, SMMU_EVENTQS); |
255 | s->idr[1] = FIELD_DP32(s->idr[1], IDR1, CMDQS, SMMU_CMDQS); |
256 | |
257 | /* 4K and 64K granule support */ |
258 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN4K, 1); |
259 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, GRAN64K, 1); |
260 | s->idr[5] = FIELD_DP32(s->idr[5], IDR5, OAS, SMMU_IDR5_OAS); /* 44 bits */ |
261 | |
262 | s->cmdq.base = deposit64(s->cmdq.base, 0, 5, SMMU_CMDQS); |
263 | s->cmdq.prod = 0; |
264 | s->cmdq.cons = 0; |
265 | s->cmdq.entry_size = sizeof(struct Cmd); |
266 | s->eventq.base = deposit64(s->eventq.base, 0, 5, SMMU_EVENTQS); |
267 | s->eventq.prod = 0; |
268 | s->eventq.cons = 0; |
269 | s->eventq.entry_size = sizeof(struct Evt); |
270 | |
271 | s->features = 0; |
272 | s->sid_split = 0; |
273 | } |
274 | |
275 | static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf, |
276 | SMMUEventInfo *event) |
277 | { |
278 | int ret; |
279 | |
280 | trace_smmuv3_get_ste(addr); |
281 | /* TODO: guarantee 64-bit single-copy atomicity */ |
282 | ret = dma_memory_read(&address_space_memory, addr, |
283 | (void *)buf, sizeof(*buf)); |
284 | if (ret != MEMTX_OK) { |
285 | qemu_log_mask(LOG_GUEST_ERROR, |
286 | "Cannot fetch pte at address=0x%" PRIx64"\n" , addr); |
287 | event->type = SMMU_EVT_F_STE_FETCH; |
288 | event->u.f_ste_fetch.addr = addr; |
289 | return -EINVAL; |
290 | } |
291 | return 0; |
292 | |
293 | } |
294 | |
295 | /* @ssid > 0 not supported yet */ |
296 | static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, |
297 | CD *buf, SMMUEventInfo *event) |
298 | { |
299 | dma_addr_t addr = STE_CTXPTR(ste); |
300 | int ret; |
301 | |
302 | trace_smmuv3_get_cd(addr); |
303 | /* TODO: guarantee 64-bit single-copy atomicity */ |
304 | ret = dma_memory_read(&address_space_memory, addr, |
305 | (void *)buf, sizeof(*buf)); |
306 | if (ret != MEMTX_OK) { |
307 | qemu_log_mask(LOG_GUEST_ERROR, |
308 | "Cannot fetch pte at address=0x%" PRIx64"\n" , addr); |
309 | event->type = SMMU_EVT_F_CD_FETCH; |
310 | event->u.f_ste_fetch.addr = addr; |
311 | return -EINVAL; |
312 | } |
313 | return 0; |
314 | } |
315 | |
316 | /* Returns < 0 in case of invalid STE, 0 otherwise */ |
317 | static int decode_ste(SMMUv3State *s, SMMUTransCfg *cfg, |
318 | STE *ste, SMMUEventInfo *event) |
319 | { |
320 | uint32_t config; |
321 | |
322 | if (!STE_VALID(ste)) { |
323 | if (!event->inval_ste_allowed) { |
324 | qemu_log_mask(LOG_GUEST_ERROR, "invalid STE\n" ); |
325 | } |
326 | goto bad_ste; |
327 | } |
328 | |
329 | config = STE_CONFIG(ste); |
330 | |
331 | if (STE_CFG_ABORT(config)) { |
332 | cfg->aborted = true; |
333 | return 0; |
334 | } |
335 | |
336 | if (STE_CFG_BYPASS(config)) { |
337 | cfg->bypassed = true; |
338 | return 0; |
339 | } |
340 | |
341 | if (STE_CFG_S2_ENABLED(config)) { |
342 | qemu_log_mask(LOG_UNIMP, "SMMUv3 does not support stage 2 yet\n" ); |
343 | goto bad_ste; |
344 | } |
345 | |
346 | if (STE_S1CDMAX(ste) != 0) { |
347 | qemu_log_mask(LOG_UNIMP, |
348 | "SMMUv3 does not support multiple context descriptors yet\n" ); |
349 | goto bad_ste; |
350 | } |
351 | |
352 | if (STE_S1STALLD(ste)) { |
353 | qemu_log_mask(LOG_UNIMP, |
354 | "SMMUv3 S1 stalling fault model not allowed yet\n" ); |
355 | goto bad_ste; |
356 | } |
357 | return 0; |
358 | |
359 | bad_ste: |
360 | event->type = SMMU_EVT_C_BAD_STE; |
361 | return -EINVAL; |
362 | } |
363 | |
364 | /** |
365 | * smmu_find_ste - Return the stream table entry associated |
366 | * to the sid |
367 | * |
368 | * @s: smmuv3 handle |
369 | * @sid: stream ID |
370 | * @ste: returned stream table entry |
371 | * @event: handle to an event info |
372 | * |
373 | * Supports linear and 2-level stream table |
374 | * Return 0 on success, -EINVAL otherwise |
375 | */ |
376 | static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, |
377 | SMMUEventInfo *event) |
378 | { |
379 | dma_addr_t addr; |
380 | int ret; |
381 | |
382 | trace_smmuv3_find_ste(sid, s->features, s->sid_split); |
383 | /* Check SID range */ |
384 | if (sid > (1 << SMMU_IDR1_SIDSIZE)) { |
385 | event->type = SMMU_EVT_C_BAD_STREAMID; |
386 | return -EINVAL; |
387 | } |
388 | if (s->features & SMMU_FEATURE_2LVL_STE) { |
389 | int l1_ste_offset, l2_ste_offset, max_l2_ste, span; |
390 | dma_addr_t strtab_base, l1ptr, l2ptr; |
391 | STEDesc l1std; |
392 | |
393 | strtab_base = s->strtab_base & SMMU_BASE_ADDR_MASK; |
394 | l1_ste_offset = sid >> s->sid_split; |
395 | l2_ste_offset = sid & ((1 << s->sid_split) - 1); |
396 | l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std)); |
397 | /* TODO: guarantee 64-bit single-copy atomicity */ |
398 | ret = dma_memory_read(&address_space_memory, l1ptr, |
399 | (uint8_t *)&l1std, sizeof(l1std)); |
400 | if (ret != MEMTX_OK) { |
401 | qemu_log_mask(LOG_GUEST_ERROR, |
402 | "Could not read L1PTR at 0X%" PRIx64"\n" , l1ptr); |
403 | event->type = SMMU_EVT_F_STE_FETCH; |
404 | event->u.f_ste_fetch.addr = l1ptr; |
405 | return -EINVAL; |
406 | } |
407 | |
408 | span = L1STD_SPAN(&l1std); |
409 | |
410 | if (!span) { |
411 | /* l2ptr is not valid */ |
412 | if (!event->inval_ste_allowed) { |
413 | qemu_log_mask(LOG_GUEST_ERROR, |
414 | "invalid sid=%d (L1STD span=0)\n" , sid); |
415 | } |
416 | event->type = SMMU_EVT_C_BAD_STREAMID; |
417 | return -EINVAL; |
418 | } |
419 | max_l2_ste = (1 << span) - 1; |
420 | l2ptr = l1std_l2ptr(&l1std); |
421 | trace_smmuv3_find_ste_2lvl(s->strtab_base, l1ptr, l1_ste_offset, |
422 | l2ptr, l2_ste_offset, max_l2_ste); |
423 | if (l2_ste_offset > max_l2_ste) { |
424 | qemu_log_mask(LOG_GUEST_ERROR, |
425 | "l2_ste_offset=%d > max_l2_ste=%d\n" , |
426 | l2_ste_offset, max_l2_ste); |
427 | event->type = SMMU_EVT_C_BAD_STE; |
428 | return -EINVAL; |
429 | } |
430 | addr = l2ptr + l2_ste_offset * sizeof(*ste); |
431 | } else { |
432 | addr = s->strtab_base + sid * sizeof(*ste); |
433 | } |
434 | |
435 | if (smmu_get_ste(s, addr, ste, event)) { |
436 | return -EINVAL; |
437 | } |
438 | |
439 | return 0; |
440 | } |
441 | |
442 | static int decode_cd(SMMUTransCfg *cfg, CD *cd, SMMUEventInfo *event) |
443 | { |
444 | int ret = -EINVAL; |
445 | int i; |
446 | |
447 | if (!CD_VALID(cd) || !CD_AARCH64(cd)) { |
448 | goto bad_cd; |
449 | } |
450 | if (!CD_A(cd)) { |
451 | goto bad_cd; /* SMMU_IDR0.TERM_MODEL == 1 */ |
452 | } |
453 | if (CD_S(cd)) { |
454 | goto bad_cd; /* !STE_SECURE && SMMU_IDR0.STALL_MODEL == 1 */ |
455 | } |
456 | if (CD_HA(cd) || CD_HD(cd)) { |
457 | goto bad_cd; /* HTTU = 0 */ |
458 | } |
459 | |
460 | /* we support only those at the moment */ |
461 | cfg->aa64 = true; |
462 | cfg->stage = 1; |
463 | |
464 | cfg->oas = oas2bits(CD_IPS(cd)); |
465 | cfg->oas = MIN(oas2bits(SMMU_IDR5_OAS), cfg->oas); |
466 | cfg->tbi = CD_TBI(cd); |
467 | cfg->asid = CD_ASID(cd); |
468 | |
469 | trace_smmuv3_decode_cd(cfg->oas); |
470 | |
471 | /* decode data dependent on TT */ |
472 | for (i = 0; i <= 1; i++) { |
473 | int tg, tsz; |
474 | SMMUTransTableInfo *tt = &cfg->tt[i]; |
475 | |
476 | cfg->tt[i].disabled = CD_EPD(cd, i); |
477 | if (cfg->tt[i].disabled) { |
478 | continue; |
479 | } |
480 | |
481 | tsz = CD_TSZ(cd, i); |
482 | if (tsz < 16 || tsz > 39) { |
483 | goto bad_cd; |
484 | } |
485 | |
486 | tg = CD_TG(cd, i); |
487 | tt->granule_sz = tg2granule(tg, i); |
488 | if ((tt->granule_sz != 12 && tt->granule_sz != 16) || CD_ENDI(cd)) { |
489 | goto bad_cd; |
490 | } |
491 | |
492 | tt->tsz = tsz; |
493 | tt->ttb = CD_TTB(cd, i); |
494 | if (tt->ttb & ~(MAKE_64BIT_MASK(0, cfg->oas))) { |
495 | goto bad_cd; |
496 | } |
497 | trace_smmuv3_decode_cd_tt(i, tt->tsz, tt->ttb, tt->granule_sz); |
498 | } |
499 | |
500 | event->record_trans_faults = CD_R(cd); |
501 | |
502 | return 0; |
503 | |
504 | bad_cd: |
505 | event->type = SMMU_EVT_C_BAD_CD; |
506 | return ret; |
507 | } |
508 | |
509 | /** |
510 | * smmuv3_decode_config - Prepare the translation configuration |
511 | * for the @mr iommu region |
512 | * @mr: iommu memory region the translation config must be prepared for |
513 | * @cfg: output translation configuration which is populated through |
514 | * the different configuration decoding steps |
515 | * @event: must be zero'ed by the caller |
516 | * |
517 | * return < 0 in case of config decoding error (@event is filled |
518 | * accordingly). Return 0 otherwise. |
519 | */ |
520 | static int smmuv3_decode_config(IOMMUMemoryRegion *mr, SMMUTransCfg *cfg, |
521 | SMMUEventInfo *event) |
522 | { |
523 | SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); |
524 | uint32_t sid = smmu_get_sid(sdev); |
525 | SMMUv3State *s = sdev->smmu; |
526 | int ret; |
527 | STE ste; |
528 | CD cd; |
529 | |
530 | ret = smmu_find_ste(s, sid, &ste, event); |
531 | if (ret) { |
532 | return ret; |
533 | } |
534 | |
535 | ret = decode_ste(s, cfg, &ste, event); |
536 | if (ret) { |
537 | return ret; |
538 | } |
539 | |
540 | if (cfg->aborted || cfg->bypassed) { |
541 | return 0; |
542 | } |
543 | |
544 | ret = smmu_get_cd(s, &ste, 0 /* ssid */, &cd, event); |
545 | if (ret) { |
546 | return ret; |
547 | } |
548 | |
549 | return decode_cd(cfg, &cd, event); |
550 | } |
551 | |
552 | /** |
553 | * smmuv3_get_config - Look up for a cached copy of configuration data for |
554 | * @sdev and on cache miss performs a configuration structure decoding from |
555 | * guest RAM. |
556 | * |
557 | * @sdev: SMMUDevice handle |
558 | * @event: output event info |
559 | * |
560 | * The configuration cache contains data resulting from both STE and CD |
561 | * decoding under the form of an SMMUTransCfg struct. The hash table is indexed |
562 | * by the SMMUDevice handle. |
563 | */ |
564 | static SMMUTransCfg *smmuv3_get_config(SMMUDevice *sdev, SMMUEventInfo *event) |
565 | { |
566 | SMMUv3State *s = sdev->smmu; |
567 | SMMUState *bc = &s->smmu_state; |
568 | SMMUTransCfg *cfg; |
569 | |
570 | cfg = g_hash_table_lookup(bc->configs, sdev); |
571 | if (cfg) { |
572 | sdev->cfg_cache_hits++; |
573 | trace_smmuv3_config_cache_hit(smmu_get_sid(sdev), |
574 | sdev->cfg_cache_hits, sdev->cfg_cache_misses, |
575 | 100 * sdev->cfg_cache_hits / |
576 | (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); |
577 | } else { |
578 | sdev->cfg_cache_misses++; |
579 | trace_smmuv3_config_cache_miss(smmu_get_sid(sdev), |
580 | sdev->cfg_cache_hits, sdev->cfg_cache_misses, |
581 | 100 * sdev->cfg_cache_hits / |
582 | (sdev->cfg_cache_hits + sdev->cfg_cache_misses)); |
583 | cfg = g_new0(SMMUTransCfg, 1); |
584 | |
585 | if (!smmuv3_decode_config(&sdev->iommu, cfg, event)) { |
586 | g_hash_table_insert(bc->configs, sdev, cfg); |
587 | } else { |
588 | g_free(cfg); |
589 | cfg = NULL; |
590 | } |
591 | } |
592 | return cfg; |
593 | } |
594 | |
595 | static void smmuv3_flush_config(SMMUDevice *sdev) |
596 | { |
597 | SMMUv3State *s = sdev->smmu; |
598 | SMMUState *bc = &s->smmu_state; |
599 | |
600 | trace_smmuv3_config_cache_inv(smmu_get_sid(sdev)); |
601 | g_hash_table_remove(bc->configs, sdev); |
602 | } |
603 | |
604 | static IOMMUTLBEntry smmuv3_translate(IOMMUMemoryRegion *mr, hwaddr addr, |
605 | IOMMUAccessFlags flag, int iommu_idx) |
606 | { |
607 | SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); |
608 | SMMUv3State *s = sdev->smmu; |
609 | uint32_t sid = smmu_get_sid(sdev); |
610 | SMMUEventInfo event = {.type = SMMU_EVT_NONE, |
611 | .sid = sid, |
612 | .inval_ste_allowed = false}; |
613 | SMMUPTWEventInfo ptw_info = {}; |
614 | SMMUTranslationStatus status; |
615 | SMMUState *bs = ARM_SMMU(s); |
616 | uint64_t page_mask, aligned_addr; |
617 | IOMMUTLBEntry *cached_entry = NULL; |
618 | SMMUTransTableInfo *tt; |
619 | SMMUTransCfg *cfg = NULL; |
620 | IOMMUTLBEntry entry = { |
621 | .target_as = &address_space_memory, |
622 | .iova = addr, |
623 | .translated_addr = addr, |
624 | .addr_mask = ~(hwaddr)0, |
625 | .perm = IOMMU_NONE, |
626 | }; |
627 | SMMUIOTLBKey key, *new_key; |
628 | |
629 | qemu_mutex_lock(&s->mutex); |
630 | |
631 | if (!smmu_enabled(s)) { |
632 | status = SMMU_TRANS_DISABLE; |
633 | goto epilogue; |
634 | } |
635 | |
636 | cfg = smmuv3_get_config(sdev, &event); |
637 | if (!cfg) { |
638 | status = SMMU_TRANS_ERROR; |
639 | goto epilogue; |
640 | } |
641 | |
642 | if (cfg->aborted) { |
643 | status = SMMU_TRANS_ABORT; |
644 | goto epilogue; |
645 | } |
646 | |
647 | if (cfg->bypassed) { |
648 | status = SMMU_TRANS_BYPASS; |
649 | goto epilogue; |
650 | } |
651 | |
652 | tt = select_tt(cfg, addr); |
653 | if (!tt) { |
654 | if (event.record_trans_faults) { |
655 | event.type = SMMU_EVT_F_TRANSLATION; |
656 | event.u.f_translation.addr = addr; |
657 | event.u.f_translation.rnw = flag & 0x1; |
658 | } |
659 | status = SMMU_TRANS_ERROR; |
660 | goto epilogue; |
661 | } |
662 | |
663 | page_mask = (1ULL << (tt->granule_sz)) - 1; |
664 | aligned_addr = addr & ~page_mask; |
665 | |
666 | key.asid = cfg->asid; |
667 | key.iova = aligned_addr; |
668 | |
669 | cached_entry = g_hash_table_lookup(bs->iotlb, &key); |
670 | if (cached_entry) { |
671 | cfg->iotlb_hits++; |
672 | trace_smmu_iotlb_cache_hit(cfg->asid, aligned_addr, |
673 | cfg->iotlb_hits, cfg->iotlb_misses, |
674 | 100 * cfg->iotlb_hits / |
675 | (cfg->iotlb_hits + cfg->iotlb_misses)); |
676 | if ((flag & IOMMU_WO) && !(cached_entry->perm & IOMMU_WO)) { |
677 | status = SMMU_TRANS_ERROR; |
678 | if (event.record_trans_faults) { |
679 | event.type = SMMU_EVT_F_PERMISSION; |
680 | event.u.f_permission.addr = addr; |
681 | event.u.f_permission.rnw = flag & 0x1; |
682 | } |
683 | } else { |
684 | status = SMMU_TRANS_SUCCESS; |
685 | } |
686 | goto epilogue; |
687 | } |
688 | |
689 | cfg->iotlb_misses++; |
690 | trace_smmu_iotlb_cache_miss(cfg->asid, addr & ~page_mask, |
691 | cfg->iotlb_hits, cfg->iotlb_misses, |
692 | 100 * cfg->iotlb_hits / |
693 | (cfg->iotlb_hits + cfg->iotlb_misses)); |
694 | |
695 | if (g_hash_table_size(bs->iotlb) >= SMMU_IOTLB_MAX_SIZE) { |
696 | smmu_iotlb_inv_all(bs); |
697 | } |
698 | |
699 | cached_entry = g_new0(IOMMUTLBEntry, 1); |
700 | |
701 | if (smmu_ptw(cfg, aligned_addr, flag, cached_entry, &ptw_info)) { |
702 | g_free(cached_entry); |
703 | switch (ptw_info.type) { |
704 | case SMMU_PTW_ERR_WALK_EABT: |
705 | event.type = SMMU_EVT_F_WALK_EABT; |
706 | event.u.f_walk_eabt.addr = addr; |
707 | event.u.f_walk_eabt.rnw = flag & 0x1; |
708 | event.u.f_walk_eabt.class = 0x1; |
709 | event.u.f_walk_eabt.addr2 = ptw_info.addr; |
710 | break; |
711 | case SMMU_PTW_ERR_TRANSLATION: |
712 | if (event.record_trans_faults) { |
713 | event.type = SMMU_EVT_F_TRANSLATION; |
714 | event.u.f_translation.addr = addr; |
715 | event.u.f_translation.rnw = flag & 0x1; |
716 | } |
717 | break; |
718 | case SMMU_PTW_ERR_ADDR_SIZE: |
719 | if (event.record_trans_faults) { |
720 | event.type = SMMU_EVT_F_ADDR_SIZE; |
721 | event.u.f_addr_size.addr = addr; |
722 | event.u.f_addr_size.rnw = flag & 0x1; |
723 | } |
724 | break; |
725 | case SMMU_PTW_ERR_ACCESS: |
726 | if (event.record_trans_faults) { |
727 | event.type = SMMU_EVT_F_ACCESS; |
728 | event.u.f_access.addr = addr; |
729 | event.u.f_access.rnw = flag & 0x1; |
730 | } |
731 | break; |
732 | case SMMU_PTW_ERR_PERMISSION: |
733 | if (event.record_trans_faults) { |
734 | event.type = SMMU_EVT_F_PERMISSION; |
735 | event.u.f_permission.addr = addr; |
736 | event.u.f_permission.rnw = flag & 0x1; |
737 | } |
738 | break; |
739 | default: |
740 | g_assert_not_reached(); |
741 | } |
742 | status = SMMU_TRANS_ERROR; |
743 | } else { |
744 | new_key = g_new0(SMMUIOTLBKey, 1); |
745 | new_key->asid = cfg->asid; |
746 | new_key->iova = aligned_addr; |
747 | g_hash_table_insert(bs->iotlb, new_key, cached_entry); |
748 | status = SMMU_TRANS_SUCCESS; |
749 | } |
750 | |
751 | epilogue: |
752 | qemu_mutex_unlock(&s->mutex); |
753 | switch (status) { |
754 | case SMMU_TRANS_SUCCESS: |
755 | entry.perm = flag; |
756 | entry.translated_addr = cached_entry->translated_addr + |
757 | (addr & page_mask); |
758 | entry.addr_mask = cached_entry->addr_mask; |
759 | trace_smmuv3_translate_success(mr->parent_obj.name, sid, addr, |
760 | entry.translated_addr, entry.perm); |
761 | break; |
762 | case SMMU_TRANS_DISABLE: |
763 | entry.perm = flag; |
764 | entry.addr_mask = ~TARGET_PAGE_MASK; |
765 | trace_smmuv3_translate_disable(mr->parent_obj.name, sid, addr, |
766 | entry.perm); |
767 | break; |
768 | case SMMU_TRANS_BYPASS: |
769 | entry.perm = flag; |
770 | entry.addr_mask = ~TARGET_PAGE_MASK; |
771 | trace_smmuv3_translate_bypass(mr->parent_obj.name, sid, addr, |
772 | entry.perm); |
773 | break; |
774 | case SMMU_TRANS_ABORT: |
775 | /* no event is recorded on abort */ |
776 | trace_smmuv3_translate_abort(mr->parent_obj.name, sid, addr, |
777 | entry.perm); |
778 | break; |
779 | case SMMU_TRANS_ERROR: |
780 | qemu_log_mask(LOG_GUEST_ERROR, |
781 | "%s translation failed for iova=0x%" PRIx64"(%s)\n" , |
782 | mr->parent_obj.name, addr, smmu_event_string(event.type)); |
783 | smmuv3_record_event(s, &event); |
784 | break; |
785 | } |
786 | |
787 | return entry; |
788 | } |
789 | |
790 | /** |
791 | * smmuv3_notify_iova - call the notifier @n for a given |
792 | * @asid and @iova tuple. |
793 | * |
794 | * @mr: IOMMU mr region handle |
795 | * @n: notifier to be called |
796 | * @asid: address space ID or negative value if we don't care |
797 | * @iova: iova |
798 | */ |
799 | static void smmuv3_notify_iova(IOMMUMemoryRegion *mr, |
800 | IOMMUNotifier *n, |
801 | int asid, |
802 | dma_addr_t iova) |
803 | { |
804 | SMMUDevice *sdev = container_of(mr, SMMUDevice, iommu); |
805 | SMMUEventInfo event = {.inval_ste_allowed = true}; |
806 | SMMUTransTableInfo *tt; |
807 | SMMUTransCfg *cfg; |
808 | IOMMUTLBEntry entry; |
809 | |
810 | cfg = smmuv3_get_config(sdev, &event); |
811 | if (!cfg) { |
812 | return; |
813 | } |
814 | |
815 | if (asid >= 0 && cfg->asid != asid) { |
816 | return; |
817 | } |
818 | |
819 | tt = select_tt(cfg, iova); |
820 | if (!tt) { |
821 | return; |
822 | } |
823 | |
824 | entry.target_as = &address_space_memory; |
825 | entry.iova = iova; |
826 | entry.addr_mask = (1 << tt->granule_sz) - 1; |
827 | entry.perm = IOMMU_NONE; |
828 | |
829 | memory_region_notify_one(n, &entry); |
830 | } |
831 | |
832 | /* invalidate an asid/iova tuple in all mr's */ |
833 | static void smmuv3_inv_notifiers_iova(SMMUState *s, int asid, dma_addr_t iova) |
834 | { |
835 | SMMUDevice *sdev; |
836 | |
837 | QLIST_FOREACH(sdev, &s->devices_with_notifiers, next) { |
838 | IOMMUMemoryRegion *mr = &sdev->iommu; |
839 | IOMMUNotifier *n; |
840 | |
841 | trace_smmuv3_inv_notifiers_iova(mr->parent_obj.name, asid, iova); |
842 | |
843 | IOMMU_NOTIFIER_FOREACH(n, mr) { |
844 | smmuv3_notify_iova(mr, n, asid, iova); |
845 | } |
846 | } |
847 | } |
848 | |
849 | static int smmuv3_cmdq_consume(SMMUv3State *s) |
850 | { |
851 | SMMUState *bs = ARM_SMMU(s); |
852 | SMMUCmdError cmd_error = SMMU_CERROR_NONE; |
853 | SMMUQueue *q = &s->cmdq; |
854 | SMMUCommandType type = 0; |
855 | |
856 | if (!smmuv3_cmdq_enabled(s)) { |
857 | return 0; |
858 | } |
859 | /* |
860 | * some commands depend on register values, typically CR0. In case those |
861 | * register values change while handling the command, spec says it |
862 | * is UNPREDICTABLE whether the command is interpreted under the new |
863 | * or old value. |
864 | */ |
865 | |
866 | while (!smmuv3_q_empty(q)) { |
867 | uint32_t pending = s->gerror ^ s->gerrorn; |
868 | Cmd cmd; |
869 | |
870 | trace_smmuv3_cmdq_consume(Q_PROD(q), Q_CONS(q), |
871 | Q_PROD_WRAP(q), Q_CONS_WRAP(q)); |
872 | |
873 | if (FIELD_EX32(pending, GERROR, CMDQ_ERR)) { |
874 | break; |
875 | } |
876 | |
877 | if (queue_read(q, &cmd) != MEMTX_OK) { |
878 | cmd_error = SMMU_CERROR_ABT; |
879 | break; |
880 | } |
881 | |
882 | type = CMD_TYPE(&cmd); |
883 | |
884 | trace_smmuv3_cmdq_opcode(smmu_cmd_string(type)); |
885 | |
886 | qemu_mutex_lock(&s->mutex); |
887 | switch (type) { |
888 | case SMMU_CMD_SYNC: |
889 | if (CMD_SYNC_CS(&cmd) & CMD_SYNC_SIG_IRQ) { |
890 | smmuv3_trigger_irq(s, SMMU_IRQ_CMD_SYNC, 0); |
891 | } |
892 | break; |
893 | case SMMU_CMD_PREFETCH_CONFIG: |
894 | case SMMU_CMD_PREFETCH_ADDR: |
895 | break; |
896 | case SMMU_CMD_CFGI_STE: |
897 | { |
898 | uint32_t sid = CMD_SID(&cmd); |
899 | IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); |
900 | SMMUDevice *sdev; |
901 | |
902 | if (CMD_SSEC(&cmd)) { |
903 | cmd_error = SMMU_CERROR_ILL; |
904 | break; |
905 | } |
906 | |
907 | if (!mr) { |
908 | break; |
909 | } |
910 | |
911 | trace_smmuv3_cmdq_cfgi_ste(sid); |
912 | sdev = container_of(mr, SMMUDevice, iommu); |
913 | smmuv3_flush_config(sdev); |
914 | |
915 | break; |
916 | } |
917 | case SMMU_CMD_CFGI_STE_RANGE: /* same as SMMU_CMD_CFGI_ALL */ |
918 | { |
919 | uint32_t start = CMD_SID(&cmd), end, i; |
920 | uint8_t range = CMD_STE_RANGE(&cmd); |
921 | |
922 | if (CMD_SSEC(&cmd)) { |
923 | cmd_error = SMMU_CERROR_ILL; |
924 | break; |
925 | } |
926 | |
927 | end = start + (1 << (range + 1)) - 1; |
928 | trace_smmuv3_cmdq_cfgi_ste_range(start, end); |
929 | |
930 | for (i = start; i <= end; i++) { |
931 | IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, i); |
932 | SMMUDevice *sdev; |
933 | |
934 | if (!mr) { |
935 | continue; |
936 | } |
937 | sdev = container_of(mr, SMMUDevice, iommu); |
938 | smmuv3_flush_config(sdev); |
939 | } |
940 | break; |
941 | } |
942 | case SMMU_CMD_CFGI_CD: |
943 | case SMMU_CMD_CFGI_CD_ALL: |
944 | { |
945 | uint32_t sid = CMD_SID(&cmd); |
946 | IOMMUMemoryRegion *mr = smmu_iommu_mr(bs, sid); |
947 | SMMUDevice *sdev; |
948 | |
949 | if (CMD_SSEC(&cmd)) { |
950 | cmd_error = SMMU_CERROR_ILL; |
951 | break; |
952 | } |
953 | |
954 | if (!mr) { |
955 | break; |
956 | } |
957 | |
958 | trace_smmuv3_cmdq_cfgi_cd(sid); |
959 | sdev = container_of(mr, SMMUDevice, iommu); |
960 | smmuv3_flush_config(sdev); |
961 | break; |
962 | } |
963 | case SMMU_CMD_TLBI_NH_ASID: |
964 | { |
965 | uint16_t asid = CMD_ASID(&cmd); |
966 | |
967 | trace_smmuv3_cmdq_tlbi_nh_asid(asid); |
968 | smmu_inv_notifiers_all(&s->smmu_state); |
969 | smmu_iotlb_inv_asid(bs, asid); |
970 | break; |
971 | } |
972 | case SMMU_CMD_TLBI_NH_ALL: |
973 | case SMMU_CMD_TLBI_NSNH_ALL: |
974 | trace_smmuv3_cmdq_tlbi_nh(); |
975 | smmu_inv_notifiers_all(&s->smmu_state); |
976 | smmu_iotlb_inv_all(bs); |
977 | break; |
978 | case SMMU_CMD_TLBI_NH_VAA: |
979 | { |
980 | dma_addr_t addr = CMD_ADDR(&cmd); |
981 | uint16_t vmid = CMD_VMID(&cmd); |
982 | |
983 | trace_smmuv3_cmdq_tlbi_nh_vaa(vmid, addr); |
984 | smmuv3_inv_notifiers_iova(bs, -1, addr); |
985 | smmu_iotlb_inv_all(bs); |
986 | break; |
987 | } |
988 | case SMMU_CMD_TLBI_NH_VA: |
989 | { |
990 | uint16_t asid = CMD_ASID(&cmd); |
991 | uint16_t vmid = CMD_VMID(&cmd); |
992 | dma_addr_t addr = CMD_ADDR(&cmd); |
993 | bool leaf = CMD_LEAF(&cmd); |
994 | |
995 | trace_smmuv3_cmdq_tlbi_nh_va(vmid, asid, addr, leaf); |
996 | smmuv3_inv_notifiers_iova(bs, asid, addr); |
997 | smmu_iotlb_inv_iova(bs, asid, addr); |
998 | break; |
999 | } |
1000 | case SMMU_CMD_TLBI_EL3_ALL: |
1001 | case SMMU_CMD_TLBI_EL3_VA: |
1002 | case SMMU_CMD_TLBI_EL2_ALL: |
1003 | case SMMU_CMD_TLBI_EL2_ASID: |
1004 | case SMMU_CMD_TLBI_EL2_VA: |
1005 | case SMMU_CMD_TLBI_EL2_VAA: |
1006 | case SMMU_CMD_TLBI_S12_VMALL: |
1007 | case SMMU_CMD_TLBI_S2_IPA: |
1008 | case SMMU_CMD_ATC_INV: |
1009 | case SMMU_CMD_PRI_RESP: |
1010 | case SMMU_CMD_RESUME: |
1011 | case SMMU_CMD_STALL_TERM: |
1012 | trace_smmuv3_unhandled_cmd(type); |
1013 | break; |
1014 | default: |
1015 | cmd_error = SMMU_CERROR_ILL; |
1016 | qemu_log_mask(LOG_GUEST_ERROR, |
1017 | "Illegal command type: %d\n" , CMD_TYPE(&cmd)); |
1018 | break; |
1019 | } |
1020 | qemu_mutex_unlock(&s->mutex); |
1021 | if (cmd_error) { |
1022 | break; |
1023 | } |
1024 | /* |
1025 | * We only increment the cons index after the completion of |
1026 | * the command. We do that because the SYNC returns immediately |
1027 | * and does not check the completion of previous commands |
1028 | */ |
1029 | queue_cons_incr(q); |
1030 | } |
1031 | |
1032 | if (cmd_error) { |
1033 | trace_smmuv3_cmdq_consume_error(smmu_cmd_string(type), cmd_error); |
1034 | smmu_write_cmdq_err(s, cmd_error); |
1035 | smmuv3_trigger_irq(s, SMMU_IRQ_GERROR, R_GERROR_CMDQ_ERR_MASK); |
1036 | } |
1037 | |
1038 | trace_smmuv3_cmdq_consume_out(Q_PROD(q), Q_CONS(q), |
1039 | Q_PROD_WRAP(q), Q_CONS_WRAP(q)); |
1040 | |
1041 | return 0; |
1042 | } |
1043 | |
1044 | static MemTxResult smmu_writell(SMMUv3State *s, hwaddr offset, |
1045 | uint64_t data, MemTxAttrs attrs) |
1046 | { |
1047 | switch (offset) { |
1048 | case A_GERROR_IRQ_CFG0: |
1049 | s->gerror_irq_cfg0 = data; |
1050 | return MEMTX_OK; |
1051 | case A_STRTAB_BASE: |
1052 | s->strtab_base = data; |
1053 | return MEMTX_OK; |
1054 | case A_CMDQ_BASE: |
1055 | s->cmdq.base = data; |
1056 | s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); |
1057 | if (s->cmdq.log2size > SMMU_CMDQS) { |
1058 | s->cmdq.log2size = SMMU_CMDQS; |
1059 | } |
1060 | return MEMTX_OK; |
1061 | case A_EVENTQ_BASE: |
1062 | s->eventq.base = data; |
1063 | s->eventq.log2size = extract64(s->eventq.base, 0, 5); |
1064 | if (s->eventq.log2size > SMMU_EVENTQS) { |
1065 | s->eventq.log2size = SMMU_EVENTQS; |
1066 | } |
1067 | return MEMTX_OK; |
1068 | case A_EVENTQ_IRQ_CFG0: |
1069 | s->eventq_irq_cfg0 = data; |
1070 | return MEMTX_OK; |
1071 | default: |
1072 | qemu_log_mask(LOG_UNIMP, |
1073 | "%s Unexpected 64-bit access to 0x%" PRIx64" (WI)\n" , |
1074 | __func__, offset); |
1075 | return MEMTX_OK; |
1076 | } |
1077 | } |
1078 | |
1079 | static MemTxResult smmu_writel(SMMUv3State *s, hwaddr offset, |
1080 | uint64_t data, MemTxAttrs attrs) |
1081 | { |
1082 | switch (offset) { |
1083 | case A_CR0: |
1084 | s->cr[0] = data; |
1085 | s->cr0ack = data & ~SMMU_CR0_RESERVED; |
1086 | /* in case the command queue has been enabled */ |
1087 | smmuv3_cmdq_consume(s); |
1088 | return MEMTX_OK; |
1089 | case A_CR1: |
1090 | s->cr[1] = data; |
1091 | return MEMTX_OK; |
1092 | case A_CR2: |
1093 | s->cr[2] = data; |
1094 | return MEMTX_OK; |
1095 | case A_IRQ_CTRL: |
1096 | s->irq_ctrl = data; |
1097 | return MEMTX_OK; |
1098 | case A_GERRORN: |
1099 | smmuv3_write_gerrorn(s, data); |
1100 | /* |
1101 | * By acknowledging the CMDQ_ERR, SW may notify cmds can |
1102 | * be processed again |
1103 | */ |
1104 | smmuv3_cmdq_consume(s); |
1105 | return MEMTX_OK; |
1106 | case A_GERROR_IRQ_CFG0: /* 64b */ |
1107 | s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 0, 32, data); |
1108 | return MEMTX_OK; |
1109 | case A_GERROR_IRQ_CFG0 + 4: |
1110 | s->gerror_irq_cfg0 = deposit64(s->gerror_irq_cfg0, 32, 32, data); |
1111 | return MEMTX_OK; |
1112 | case A_GERROR_IRQ_CFG1: |
1113 | s->gerror_irq_cfg1 = data; |
1114 | return MEMTX_OK; |
1115 | case A_GERROR_IRQ_CFG2: |
1116 | s->gerror_irq_cfg2 = data; |
1117 | return MEMTX_OK; |
1118 | case A_STRTAB_BASE: /* 64b */ |
1119 | s->strtab_base = deposit64(s->strtab_base, 0, 32, data); |
1120 | return MEMTX_OK; |
1121 | case A_STRTAB_BASE + 4: |
1122 | s->strtab_base = deposit64(s->strtab_base, 32, 32, data); |
1123 | return MEMTX_OK; |
1124 | case A_STRTAB_BASE_CFG: |
1125 | s->strtab_base_cfg = data; |
1126 | if (FIELD_EX32(data, STRTAB_BASE_CFG, FMT) == 1) { |
1127 | s->sid_split = FIELD_EX32(data, STRTAB_BASE_CFG, SPLIT); |
1128 | s->features |= SMMU_FEATURE_2LVL_STE; |
1129 | } |
1130 | return MEMTX_OK; |
1131 | case A_CMDQ_BASE: /* 64b */ |
1132 | s->cmdq.base = deposit64(s->cmdq.base, 0, 32, data); |
1133 | s->cmdq.log2size = extract64(s->cmdq.base, 0, 5); |
1134 | if (s->cmdq.log2size > SMMU_CMDQS) { |
1135 | s->cmdq.log2size = SMMU_CMDQS; |
1136 | } |
1137 | return MEMTX_OK; |
1138 | case A_CMDQ_BASE + 4: /* 64b */ |
1139 | s->cmdq.base = deposit64(s->cmdq.base, 32, 32, data); |
1140 | return MEMTX_OK; |
1141 | case A_CMDQ_PROD: |
1142 | s->cmdq.prod = data; |
1143 | smmuv3_cmdq_consume(s); |
1144 | return MEMTX_OK; |
1145 | case A_CMDQ_CONS: |
1146 | s->cmdq.cons = data; |
1147 | return MEMTX_OK; |
1148 | case A_EVENTQ_BASE: /* 64b */ |
1149 | s->eventq.base = deposit64(s->eventq.base, 0, 32, data); |
1150 | s->eventq.log2size = extract64(s->eventq.base, 0, 5); |
1151 | if (s->eventq.log2size > SMMU_EVENTQS) { |
1152 | s->eventq.log2size = SMMU_EVENTQS; |
1153 | } |
1154 | return MEMTX_OK; |
1155 | case A_EVENTQ_BASE + 4: |
1156 | s->eventq.base = deposit64(s->eventq.base, 32, 32, data); |
1157 | return MEMTX_OK; |
1158 | case A_EVENTQ_PROD: |
1159 | s->eventq.prod = data; |
1160 | return MEMTX_OK; |
1161 | case A_EVENTQ_CONS: |
1162 | s->eventq.cons = data; |
1163 | return MEMTX_OK; |
1164 | case A_EVENTQ_IRQ_CFG0: /* 64b */ |
1165 | s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 0, 32, data); |
1166 | return MEMTX_OK; |
1167 | case A_EVENTQ_IRQ_CFG0 + 4: |
1168 | s->eventq_irq_cfg0 = deposit64(s->eventq_irq_cfg0, 32, 32, data); |
1169 | return MEMTX_OK; |
1170 | case A_EVENTQ_IRQ_CFG1: |
1171 | s->eventq_irq_cfg1 = data; |
1172 | return MEMTX_OK; |
1173 | case A_EVENTQ_IRQ_CFG2: |
1174 | s->eventq_irq_cfg2 = data; |
1175 | return MEMTX_OK; |
1176 | default: |
1177 | qemu_log_mask(LOG_UNIMP, |
1178 | "%s Unexpected 32-bit access to 0x%" PRIx64" (WI)\n" , |
1179 | __func__, offset); |
1180 | return MEMTX_OK; |
1181 | } |
1182 | } |
1183 | |
1184 | static MemTxResult smmu_write_mmio(void *opaque, hwaddr offset, uint64_t data, |
1185 | unsigned size, MemTxAttrs attrs) |
1186 | { |
1187 | SMMUState *sys = opaque; |
1188 | SMMUv3State *s = ARM_SMMUV3(sys); |
1189 | MemTxResult r; |
1190 | |
1191 | /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ |
1192 | offset &= ~0x10000; |
1193 | |
1194 | switch (size) { |
1195 | case 8: |
1196 | r = smmu_writell(s, offset, data, attrs); |
1197 | break; |
1198 | case 4: |
1199 | r = smmu_writel(s, offset, data, attrs); |
1200 | break; |
1201 | default: |
1202 | r = MEMTX_ERROR; |
1203 | break; |
1204 | } |
1205 | |
1206 | trace_smmuv3_write_mmio(offset, data, size, r); |
1207 | return r; |
1208 | } |
1209 | |
1210 | static MemTxResult smmu_readll(SMMUv3State *s, hwaddr offset, |
1211 | uint64_t *data, MemTxAttrs attrs) |
1212 | { |
1213 | switch (offset) { |
1214 | case A_GERROR_IRQ_CFG0: |
1215 | *data = s->gerror_irq_cfg0; |
1216 | return MEMTX_OK; |
1217 | case A_STRTAB_BASE: |
1218 | *data = s->strtab_base; |
1219 | return MEMTX_OK; |
1220 | case A_CMDQ_BASE: |
1221 | *data = s->cmdq.base; |
1222 | return MEMTX_OK; |
1223 | case A_EVENTQ_BASE: |
1224 | *data = s->eventq.base; |
1225 | return MEMTX_OK; |
1226 | default: |
1227 | *data = 0; |
1228 | qemu_log_mask(LOG_UNIMP, |
1229 | "%s Unexpected 64-bit access to 0x%" PRIx64" (RAZ)\n" , |
1230 | __func__, offset); |
1231 | return MEMTX_OK; |
1232 | } |
1233 | } |
1234 | |
1235 | static MemTxResult smmu_readl(SMMUv3State *s, hwaddr offset, |
1236 | uint64_t *data, MemTxAttrs attrs) |
1237 | { |
1238 | switch (offset) { |
1239 | case A_IDREGS ... A_IDREGS + 0x2f: |
1240 | *data = smmuv3_idreg(offset - A_IDREGS); |
1241 | return MEMTX_OK; |
1242 | case A_IDR0 ... A_IDR5: |
1243 | *data = s->idr[(offset - A_IDR0) / 4]; |
1244 | return MEMTX_OK; |
1245 | case A_IIDR: |
1246 | *data = s->iidr; |
1247 | return MEMTX_OK; |
1248 | case A_CR0: |
1249 | *data = s->cr[0]; |
1250 | return MEMTX_OK; |
1251 | case A_CR0ACK: |
1252 | *data = s->cr0ack; |
1253 | return MEMTX_OK; |
1254 | case A_CR1: |
1255 | *data = s->cr[1]; |
1256 | return MEMTX_OK; |
1257 | case A_CR2: |
1258 | *data = s->cr[2]; |
1259 | return MEMTX_OK; |
1260 | case A_STATUSR: |
1261 | *data = s->statusr; |
1262 | return MEMTX_OK; |
1263 | case A_IRQ_CTRL: |
1264 | case A_IRQ_CTRL_ACK: |
1265 | *data = s->irq_ctrl; |
1266 | return MEMTX_OK; |
1267 | case A_GERROR: |
1268 | *data = s->gerror; |
1269 | return MEMTX_OK; |
1270 | case A_GERRORN: |
1271 | *data = s->gerrorn; |
1272 | return MEMTX_OK; |
1273 | case A_GERROR_IRQ_CFG0: /* 64b */ |
1274 | *data = extract64(s->gerror_irq_cfg0, 0, 32); |
1275 | return MEMTX_OK; |
1276 | case A_GERROR_IRQ_CFG0 + 4: |
1277 | *data = extract64(s->gerror_irq_cfg0, 32, 32); |
1278 | return MEMTX_OK; |
1279 | case A_GERROR_IRQ_CFG1: |
1280 | *data = s->gerror_irq_cfg1; |
1281 | return MEMTX_OK; |
1282 | case A_GERROR_IRQ_CFG2: |
1283 | *data = s->gerror_irq_cfg2; |
1284 | return MEMTX_OK; |
1285 | case A_STRTAB_BASE: /* 64b */ |
1286 | *data = extract64(s->strtab_base, 0, 32); |
1287 | return MEMTX_OK; |
1288 | case A_STRTAB_BASE + 4: /* 64b */ |
1289 | *data = extract64(s->strtab_base, 32, 32); |
1290 | return MEMTX_OK; |
1291 | case A_STRTAB_BASE_CFG: |
1292 | *data = s->strtab_base_cfg; |
1293 | return MEMTX_OK; |
1294 | case A_CMDQ_BASE: /* 64b */ |
1295 | *data = extract64(s->cmdq.base, 0, 32); |
1296 | return MEMTX_OK; |
1297 | case A_CMDQ_BASE + 4: |
1298 | *data = extract64(s->cmdq.base, 32, 32); |
1299 | return MEMTX_OK; |
1300 | case A_CMDQ_PROD: |
1301 | *data = s->cmdq.prod; |
1302 | return MEMTX_OK; |
1303 | case A_CMDQ_CONS: |
1304 | *data = s->cmdq.cons; |
1305 | return MEMTX_OK; |
1306 | case A_EVENTQ_BASE: /* 64b */ |
1307 | *data = extract64(s->eventq.base, 0, 32); |
1308 | return MEMTX_OK; |
1309 | case A_EVENTQ_BASE + 4: /* 64b */ |
1310 | *data = extract64(s->eventq.base, 32, 32); |
1311 | return MEMTX_OK; |
1312 | case A_EVENTQ_PROD: |
1313 | *data = s->eventq.prod; |
1314 | return MEMTX_OK; |
1315 | case A_EVENTQ_CONS: |
1316 | *data = s->eventq.cons; |
1317 | return MEMTX_OK; |
1318 | default: |
1319 | *data = 0; |
1320 | qemu_log_mask(LOG_UNIMP, |
1321 | "%s unhandled 32-bit access at 0x%" PRIx64" (RAZ)\n" , |
1322 | __func__, offset); |
1323 | return MEMTX_OK; |
1324 | } |
1325 | } |
1326 | |
1327 | static MemTxResult smmu_read_mmio(void *opaque, hwaddr offset, uint64_t *data, |
1328 | unsigned size, MemTxAttrs attrs) |
1329 | { |
1330 | SMMUState *sys = opaque; |
1331 | SMMUv3State *s = ARM_SMMUV3(sys); |
1332 | MemTxResult r; |
1333 | |
1334 | /* CONSTRAINED UNPREDICTABLE choice to have page0/1 be exact aliases */ |
1335 | offset &= ~0x10000; |
1336 | |
1337 | switch (size) { |
1338 | case 8: |
1339 | r = smmu_readll(s, offset, data, attrs); |
1340 | break; |
1341 | case 4: |
1342 | r = smmu_readl(s, offset, data, attrs); |
1343 | break; |
1344 | default: |
1345 | r = MEMTX_ERROR; |
1346 | break; |
1347 | } |
1348 | |
1349 | trace_smmuv3_read_mmio(offset, *data, size, r); |
1350 | return r; |
1351 | } |
1352 | |
1353 | static const MemoryRegionOps smmu_mem_ops = { |
1354 | .read_with_attrs = smmu_read_mmio, |
1355 | .write_with_attrs = smmu_write_mmio, |
1356 | .endianness = DEVICE_LITTLE_ENDIAN, |
1357 | .valid = { |
1358 | .min_access_size = 4, |
1359 | .max_access_size = 8, |
1360 | }, |
1361 | .impl = { |
1362 | .min_access_size = 4, |
1363 | .max_access_size = 8, |
1364 | }, |
1365 | }; |
1366 | |
1367 | static void smmu_init_irq(SMMUv3State *s, SysBusDevice *dev) |
1368 | { |
1369 | int i; |
1370 | |
1371 | for (i = 0; i < ARRAY_SIZE(s->irq); i++) { |
1372 | sysbus_init_irq(dev, &s->irq[i]); |
1373 | } |
1374 | } |
1375 | |
1376 | static void smmu_reset(DeviceState *dev) |
1377 | { |
1378 | SMMUv3State *s = ARM_SMMUV3(dev); |
1379 | SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); |
1380 | |
1381 | c->parent_reset(dev); |
1382 | |
1383 | smmuv3_init_regs(s); |
1384 | } |
1385 | |
1386 | static void smmu_realize(DeviceState *d, Error **errp) |
1387 | { |
1388 | SMMUState *sys = ARM_SMMU(d); |
1389 | SMMUv3State *s = ARM_SMMUV3(sys); |
1390 | SMMUv3Class *c = ARM_SMMUV3_GET_CLASS(s); |
1391 | SysBusDevice *dev = SYS_BUS_DEVICE(d); |
1392 | Error *local_err = NULL; |
1393 | |
1394 | c->parent_realize(d, &local_err); |
1395 | if (local_err) { |
1396 | error_propagate(errp, local_err); |
1397 | return; |
1398 | } |
1399 | |
1400 | qemu_mutex_init(&s->mutex); |
1401 | |
1402 | memory_region_init_io(&sys->iomem, OBJECT(s), |
1403 | &smmu_mem_ops, sys, TYPE_ARM_SMMUV3, 0x20000); |
1404 | |
1405 | sys->mrtypename = TYPE_SMMUV3_IOMMU_MEMORY_REGION; |
1406 | |
1407 | sysbus_init_mmio(dev, &sys->iomem); |
1408 | |
1409 | smmu_init_irq(s, dev); |
1410 | } |
1411 | |
1412 | static const VMStateDescription vmstate_smmuv3_queue = { |
1413 | .name = "smmuv3_queue" , |
1414 | .version_id = 1, |
1415 | .minimum_version_id = 1, |
1416 | .fields = (VMStateField[]) { |
1417 | VMSTATE_UINT64(base, SMMUQueue), |
1418 | VMSTATE_UINT32(prod, SMMUQueue), |
1419 | VMSTATE_UINT32(cons, SMMUQueue), |
1420 | VMSTATE_UINT8(log2size, SMMUQueue), |
1421 | VMSTATE_END_OF_LIST(), |
1422 | }, |
1423 | }; |
1424 | |
1425 | static const VMStateDescription vmstate_smmuv3 = { |
1426 | .name = "smmuv3" , |
1427 | .version_id = 1, |
1428 | .minimum_version_id = 1, |
1429 | .fields = (VMStateField[]) { |
1430 | VMSTATE_UINT32(features, SMMUv3State), |
1431 | VMSTATE_UINT8(sid_size, SMMUv3State), |
1432 | VMSTATE_UINT8(sid_split, SMMUv3State), |
1433 | |
1434 | VMSTATE_UINT32_ARRAY(cr, SMMUv3State, 3), |
1435 | VMSTATE_UINT32(cr0ack, SMMUv3State), |
1436 | VMSTATE_UINT32(statusr, SMMUv3State), |
1437 | VMSTATE_UINT32(irq_ctrl, SMMUv3State), |
1438 | VMSTATE_UINT32(gerror, SMMUv3State), |
1439 | VMSTATE_UINT32(gerrorn, SMMUv3State), |
1440 | VMSTATE_UINT64(gerror_irq_cfg0, SMMUv3State), |
1441 | VMSTATE_UINT32(gerror_irq_cfg1, SMMUv3State), |
1442 | VMSTATE_UINT32(gerror_irq_cfg2, SMMUv3State), |
1443 | VMSTATE_UINT64(strtab_base, SMMUv3State), |
1444 | VMSTATE_UINT32(strtab_base_cfg, SMMUv3State), |
1445 | VMSTATE_UINT64(eventq_irq_cfg0, SMMUv3State), |
1446 | VMSTATE_UINT32(eventq_irq_cfg1, SMMUv3State), |
1447 | VMSTATE_UINT32(eventq_irq_cfg2, SMMUv3State), |
1448 | |
1449 | VMSTATE_STRUCT(cmdq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), |
1450 | VMSTATE_STRUCT(eventq, SMMUv3State, 0, vmstate_smmuv3_queue, SMMUQueue), |
1451 | |
1452 | VMSTATE_END_OF_LIST(), |
1453 | }, |
1454 | }; |
1455 | |
1456 | static void smmuv3_instance_init(Object *obj) |
1457 | { |
1458 | /* Nothing much to do here as of now */ |
1459 | } |
1460 | |
1461 | static void smmuv3_class_init(ObjectClass *klass, void *data) |
1462 | { |
1463 | DeviceClass *dc = DEVICE_CLASS(klass); |
1464 | SMMUv3Class *c = ARM_SMMUV3_CLASS(klass); |
1465 | |
1466 | dc->vmsd = &vmstate_smmuv3; |
1467 | device_class_set_parent_reset(dc, smmu_reset, &c->parent_reset); |
1468 | c->parent_realize = dc->realize; |
1469 | dc->realize = smmu_realize; |
1470 | } |
1471 | |
1472 | static void smmuv3_notify_flag_changed(IOMMUMemoryRegion *iommu, |
1473 | IOMMUNotifierFlag old, |
1474 | IOMMUNotifierFlag new) |
1475 | { |
1476 | SMMUDevice *sdev = container_of(iommu, SMMUDevice, iommu); |
1477 | SMMUv3State *s3 = sdev->smmu; |
1478 | SMMUState *s = &(s3->smmu_state); |
1479 | |
1480 | if (new & IOMMU_NOTIFIER_MAP) { |
1481 | int bus_num = pci_bus_num(sdev->bus); |
1482 | PCIDevice *pcidev = pci_find_device(sdev->bus, bus_num, sdev->devfn); |
1483 | |
1484 | warn_report("SMMUv3 does not support notification on MAP: " |
1485 | "device %s will not function properly" , pcidev->name); |
1486 | } |
1487 | |
1488 | if (old == IOMMU_NOTIFIER_NONE) { |
1489 | trace_smmuv3_notify_flag_add(iommu->parent_obj.name); |
1490 | QLIST_INSERT_HEAD(&s->devices_with_notifiers, sdev, next); |
1491 | } else if (new == IOMMU_NOTIFIER_NONE) { |
1492 | trace_smmuv3_notify_flag_del(iommu->parent_obj.name); |
1493 | QLIST_REMOVE(sdev, next); |
1494 | } |
1495 | } |
1496 | |
1497 | static void smmuv3_iommu_memory_region_class_init(ObjectClass *klass, |
1498 | void *data) |
1499 | { |
1500 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); |
1501 | |
1502 | imrc->translate = smmuv3_translate; |
1503 | imrc->notify_flag_changed = smmuv3_notify_flag_changed; |
1504 | } |
1505 | |
1506 | static const TypeInfo smmuv3_type_info = { |
1507 | .name = TYPE_ARM_SMMUV3, |
1508 | .parent = TYPE_ARM_SMMU, |
1509 | .instance_size = sizeof(SMMUv3State), |
1510 | .instance_init = smmuv3_instance_init, |
1511 | .class_size = sizeof(SMMUv3Class), |
1512 | .class_init = smmuv3_class_init, |
1513 | }; |
1514 | |
1515 | static const TypeInfo smmuv3_iommu_memory_region_info = { |
1516 | .parent = TYPE_IOMMU_MEMORY_REGION, |
1517 | .name = TYPE_SMMUV3_IOMMU_MEMORY_REGION, |
1518 | .class_init = smmuv3_iommu_memory_region_class_init, |
1519 | }; |
1520 | |
1521 | static void smmuv3_register_types(void) |
1522 | { |
1523 | type_register(&smmuv3_type_info); |
1524 | type_register(&smmuv3_iommu_memory_region_info); |
1525 | } |
1526 | |
1527 | type_init(smmuv3_register_types) |
1528 | |
1529 | |